Ejemplo n.º 1
0
 def startMethodAndJoin(servmethod, param, url=None): 
     thread = SyncThread()
     thread.callBackURL = url
     thread.startMethod(servmethod,param)
     thread.join()
     connection.close()
     return
Ejemplo n.º 2
0
Archivo: tests.py Proyecto: 10sr/hue
    def run_select_for_update(self, status, nowait=False):
        """
        Utility method that runs a SELECT FOR UPDATE against all
        Person instances. After the select_for_update, it attempts
        to update the name of the only record, save, and commit.

        This function expects to run in a separate thread.
        """
        status.append('started')
        try:
            # We need to enter transaction management again, as this is done on
            # per-thread basis
            transaction.enter_transaction_management()
            people = list(
                Person.objects.all().select_for_update(nowait=nowait)
            )
            people[0].name = 'Fred'
            people[0].save()
            transaction.commit()
        except DatabaseError as e:
            status.append(e)
        finally:
            # This method is run in a separate thread. It uses its own
            # database connection. Close it without waiting for the GC.
            transaction.abort()
            connection.close()
Ejemplo n.º 3
0
    def handle(self, path_template=None, **options):
        v = int(options.get('verbosity', 0))
        if v == 0:
            logger.level = logging.ERROR
        elif v == 1:
            logger.level = logging.INFO
        elif v >= 2:
            logger.level = logging.DEBUG

        if not path_template:
            path_template = '/srv/ftp/%(repo)s/os/%(arch)s/'
        self.path_template = path_template

        notifier = self.setup_notifier()
        # this thread is done using the database; all future access is done in
        # the spawned read_repo() processes, so close the otherwise completely
        # idle connection.
        connection.close()

        logger.info('Entering notifier loop')
        notifier.loop()

        logger.info('Cancelling remaining threads...')
        for thread in threading.enumerate():
            if hasattr(thread, 'cancel'):
                thread.cancel()
Ejemplo n.º 4
0
def added_mem(request, **kwargs):
    """
    查看自己添加的会员
    """
    template_name = kwargs.pop("template_name")
    right = "store/adder.html"
    UserBasic = kwargs.pop("Mem")
    UserAdder = user_adder.objects.filter(adding = UserBasic)
    from django.db import connection
    cursor = connection.cursor()
    cursor.execute("SELECT	BAS.number , BAS.name , LM.name ,MID.init_money ,  BAS.start_date FROM	`rtyk_triple`.`member_user_adder` AD INNER JOIN `rtyk_triple`.`member_user_mid_mem` MID ON MID.user_id = AD.added_id INNER JOIN `rtyk_triple`.`member_user_basic` BAS ON BAS.id = AD.added_id INNER JOIN `rtyk_triple`.`management_member_lv_money` LM ON LM.id = MID.level_id WHERE unix_timestamp(start_date) BETWEEN unix_timestamp(CURRENT_DATE - INTERVAL 3 MONTH) AND unix_timestamp(CURRENT_DATE)  and AD.adding_id = %s",UserBasic.id)
    adding_mem = cursor.fetchall()
    connection.close()
    cursor.close()
    
    adding_mem = PaginatorFuc(request,adding_mem,number=20)
#    print adding_mem
#    
#    if UserAdder:
#        for UAdder in UserAdder:
#            UserMidMem = user_mid_mem.objects.filter(user = UAdder.added)[0]
#            UAdder.level_name = UserMidMem.level.name
#            UAdder.money = UserMidMem.level.money
    
    ctx = {
        'UserBasic': UserBasic,
        'user_adder': adding_mem,
        'right': right,
        'user_number':UserBasic.number
    }
    
    return render_to_response(template_name,RequestContext(request,ctx))
Ejemplo n.º 5
0
def handle_one(check):
    """ Send an alert for a single check.

    Return True if an appropriate check was selected and processed.
    Return False if no checks need to be processed.

    """
    check.status = check.get_status()

    tmpl = "Sending alert, status=%s, code=%s\n"
    _stdout(tmpl % (check.status, check.code))

    # Save the new status.
    check.status = check.get_status()
    check.save()

    tmpl = "\nSending alert, status=%s, code=%s\n"
    _stdout(tmpl % (check.status, check.code))
    try:
        errors = check.send_alert()
    except:
        agent.record_exception()
    finally:
        connection.close()

    for ch, error in errors:
        _stdout("ERROR: %s %s %s\n" % (ch.kind, ch.value, error))

    return True
Ejemplo n.º 6
0
def version_create(request, package_name):
    """
    The form will create versions according to permissions,
    plugin name and description are updated according to the info
    contained in the package metadata
    """
    plugin = get_object_or_404(Plugin, package_name=package_name)
    if not check_plugin_access(request.user, plugin):
        return render_to_response('plugins/version_permission_deny.html', { 'plugin' : plugin }, context_instance=RequestContext(request))

    version = PluginVersion(plugin = plugin, created_by = request.user)
    if request.method == 'POST':

        form = PluginVersionForm(request.POST, request.FILES, instance=version, is_trusted=request.user.has_perm('plugins.can_approve'))
        if form.is_valid():
            try:
                new_object = form.save()
                msg = _("The Plugin Version has been successfully created.")
                messages.success(request, msg, fail_silently=True)
                # The approved flag is also controlled in the form, but we
                # are checking it here in any case for additional security
                if not request.user.has_perm('plugins.can_approve'):
                    new_object.approved = False
                    new_object.save()
                    messages.warning(request, _('You do not have approval permissions, plugin version has been set unapproved.'), fail_silently=True)
                    version_notify(new_object)
                if form.cleaned_data.get('icon_file'):
                    form.cleaned_data['icon'] = form.cleaned_data.get('icon_file')
                _main_plugin_update(request, new_object.plugin, form)
                _check_optional_metadata(form, request)
                return HttpResponseRedirect(new_object.plugin.get_absolute_url())
            except (IntegrityError, ValidationError, DjangoUnicodeDecodeError), e:
                messages.error(request, e, fail_silently=True)
                connection.close()
            return HttpResponseRedirect(plugin.get_absolute_url())
Ejemplo n.º 7
0
    def handle(self, **options):

        
        if options['index_type'] == 'feature':
                    feature_index = site.get_index(Feature)
                    features = Feature.objects.filter(published=True)
                    back.update(feature_index, features)
        else:
            recipient_index = site.get_index(Recipient)
            location_index = site.get_index(Location)
            
            if options['country']:
                index_data = Recipient.objects.select_related().filter(countrypayment=options['country'], total__gt=1000).only('name', 'geo1', 'geo2', 'geo3', 'geo4', 'zipcode', 'countrypayment')
                locations = Location.objects.filter(country=options['country'])
            else:
                raise ValueError('Country is required')
            
            settings.HAYSTACK_XAPIAN_PATH = "%s-%s" % (settings.HAYSTACK_XAPIAN_PATH, options['country'])
            back = backend.SearchBackend()
            print "now indexing Recipients"
            back.update(recipient_index, index_data)

            print "now indexing Location"
            back.update(location_index, locations)
        connection.close()
Ejemplo n.º 8
0
 def set_many(self):
     usage_key = self._gen_usage_key()
     self.client.get_many(self.client.username, [usage_key])
     self.client.set_many(self.client.username, {usage_key: self._gen_block_data()})
     self.usages_with_data.add(usage_key)
     transaction.commit()
     connection.close()
Ejemplo n.º 9
0
 def test_close(self):
     with self.store_signals() as signals:
         connection.close()
     self.run_query()
     self.assertEqual(len(signals), 2)
     self.assertEqual('pre_close', signals[0][0])
     self.assertEqual('post_close', signals[1][0])
Ejemplo n.º 10
0
def refresh(stream=sys.stdout, dumpfile=None, create=False, quiet=False, yes=None, pgformat=False):
    """Apply a dump file, dropping and creating database."""
    if not dumpfile or not os.path.exists(dumpfile):
        raise CuckooError('You must provide a valid dump file: %s' % dumpfile)

    connection.close()
    cuckoo_db_name = getattr(settings, 'CUCKOO_DB', 'default')
    env = settings.DATABASES[cuckoo_db_name]
    dropcmd = get_db_shell_cmd(cuckoo_db_name, True, True, 'dropdb')
    if database_exists(env['NAME']):
        print '[CUCKOO] Dropping database.'
        checked_call(dropcmd, "could not drop database")
    if create:
        print '[CUCKOO] Creating database.'
        createcmd = get_db_shell_cmd(cuckoo_db_name, False, True, 'createdb')
        checked_call(createcmd, "Could not create database")
    print '[CUCKOO] Applying dump file: %s' % dumpfile
    try:
        if pgformat:
            restorecmd = get_db_shell_cmd(cuckoo_db_name, True, True, 'pg_restore -e -Fc -j 4')
            restorecmd += ' %s' % dumpfile
            print restorecmd
            checked_call(restorecmd, "could not restore database")
        else:
            output = _execute_file(cuckoo_db_name, dumpfile, exists=create, dba=True)
            if not quiet:
                print output
    except Exception as e:
        raise CuckooError("[CUCKOO] Error while executing dump file %s\n %s" % (dumpfile, e))
Ejemplo n.º 11
0
    def handle(self, **options):
        '''The main entry point for the Django management command.'''

        import_start = datetime.datetime.now()
        # Start transaction management.
        transaction.commit_unless_managed()
        transaction.enter_transaction_management()
        transaction.managed(True)
        try:
            self._promote_devel()
        except:
            self._rollback_db()
            raise
        # Finalize the transaction and close the db connection.
        transaction.commit()
        transaction.leave_transaction_management()
        connection.close()
        import_end = datetime.datetime.now()

        # Print a short summary of what we did.
        td = import_end - import_start
        print '\nProcessing complete in %s days, %s.%s seconds.' % (
          td.days, td.seconds, td.microseconds)
        print '  TraitData objects promoted: %s' % (
          PublicTraitData.objects.all().count(),)
Ejemplo n.º 12
0
def error_rate(instrument_id, n_hours=24):
    """
        Returns the rate of errors for the last n_hours hours.
        @param instrument_id: Instrument model object
        @param n_hours: number of hours to track
    """
    # Try calling the stored procedure (faster)
    try:
        cursor = connection.cursor()
        cursor.callproc("error_rate", (instrument_id.id,))
        msg = cursor.fetchone()[0]
        cursor.execute('FETCH ALL IN "%s"' % msg)
        rows = cursor.fetchall()
        cursor.close()
        return [[int(r[0]), int(r[1])] for r in rows]
    except:
        connection.close()
        logging.error("Error rate (%s): %s", str(instrument_id), sys.exc_value)

        # Do it by hand (slow)
        time = timezone.now()
        errors = []
        running_sum = 0
        for i in range(n_hours):
            t_i = time - datetime.timedelta(hours=i + 1)
            n = Error.objects.filter(run_status_id__run_id__instrument_id=instrument_id,
                                     run_status_id__created_on__gte=t_i).count()
            n -= running_sum
            running_sum += n
            errors.append([-i, n])
        return errors
Ejemplo n.º 13
0
def create_test_spatial_db(verbosity=1, autoclobber=False, interactive=False):
    "Creates a spatial database based on the settings."

    # Making sure we're using PostgreSQL and psycopg2
    if settings.DATABASE_ENGINE != 'sqlite3':
        raise Exception('SpatiaLite database creation only supported on sqlite3 platform.')

    # Getting the test database name using the the SQLite backend's
    # `_create_test_db`.  Unless `TEST_DATABASE_NAME` is defined,
    # it returns ":memory:".
    db_name = connection.creation._create_test_db(verbosity, autoclobber)

    # Closing out the current connection to the database set in
    # originally in the settings.  This makes it so `initialize_spatialite`
    # function will be run on the connection for the _test_ database instead.
    connection.close()

    # Point to the new database
    settings.DATABASE_NAME = db_name
    connection.settings_dict["DATABASE_NAME"] = db_name
    can_rollback = connection.creation._rollback_works()
    settings.DATABASE_SUPPORTS_TRANSACTIONS = can_rollback
    connection.settings_dict["DATABASE_SUPPORTS_TRANSACTIONS"] = can_rollback

    # Finally, loading up the SpatiaLite SQL file.
    load_spatialite_sql(db_name, verbosity=verbosity)

    if verbosity >= 1:
        print 'Creation of spatial database %s successful.' % db_name

    # Syncing the database
    call_command('syncdb', verbosity=verbosity, interactive=interactive)
Ejemplo n.º 14
0
 def _run(plugin):
     plugin.run_check()
     try:
         return plugin
     finally:
         from django.db import connection
         connection.close()
Ejemplo n.º 15
0
    def handle(self, *args, **options):
        """Django command handler."""
        self.verbosity = int(options.get('verbosity'))
        self.quiet = options.get('quiet')
        try:
            connection.close()
            self.filepath = options.get('filepath')
            self.backup_extension = options.get('backup_extension') or 'backup'
            self.servername = options.get('servername')
            self.decrypt = options.get('decrypt')
            self.uncompress = options.get('uncompress')
            self.passphrase = options.get('passphrase')
            self.interactive = options.get('interactive')
            self.database = self._get_database(options)
            self.storage = BaseStorage.storage_factory()
            self.database = self._get_database(options)
            if 'mongo' in self.database['ENGINE']:
                self.dbcommands = MongoDBCommands(self.database)
            else:
                self.dbcommands = DBCommands(self.database)

            if not self.backup_extension:
                self.backup_extension = self.dbcommands.settings.extension or 'backup'
            if options.get('list'):
                return self.list_backups()
            self.restore_backup()
        except StorageError as err:
            raise CommandError(err)
Ejemplo n.º 16
0
def day_wise_logins(request):
    if request.user.is_superuser:
        from django.db import connection
        cursor = connection.cursor()
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 7;")
        inactive_users_week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 14;")
        inactive_users_2week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 21;")
        inactive_users_3week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 28;")
        inactive_users_4week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 56;")
        inactive_users_8week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where datediff(date(now()),date(last_login)) > 84;")
        inactive_users_12week = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where last_login=date_joined and is_active=1;")
        one_time_loggers = cursor.fetchall()[0][0]
        cursor.execute("select count(*) from auth_user where date(last_login)=date(date_joined) and is_active=1;")
        one_day_loggers = cursor.fetchall()[0][0]
        connection.close()
        return render_to_response("console/daily_logins.html", {'one_time_loggers':one_time_loggers,'one_day_loggers':one_day_loggers,'inactive_users_week':inactive_users_week,'inactive_users_2week':
                                                                inactive_users_2week,'inactive_users_3week':inactive_users_3week, 'inactive_users_4week':inactive_users_4week,
                                                                'inactive_users_8week':inactive_users_8week,'inactive_users_12week':inactive_users_12week})
    else:
        return HttpResponseRedirect('/'+request.user.username+'/')	        
Ejemplo n.º 17
0
def create_test_spatial_db(verbosity=1, autoclobber=False, interactive=False):
    "Creates a test spatial database based on the settings."

    # Making sure we're using PostgreSQL and psycopg2
    if settings.DATABASE_ENGINE != "postgresql_psycopg2":
        raise Exception("Spatial database creation only supported postgresql_psycopg2 platform.")

    # Getting the spatial database name
    db_name = get_spatial_db(test=True)
    _create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)

    # If a template database is used, then don't need to do any of the following.
    if not hasattr(settings, "POSTGIS_TEMPLATE"):
        # Creating the db language, does not need to be done on NT platforms
        # since the PostGIS installer enables this capability.
        if os.name != "nt":
            create_lang(db_name, verbosity=verbosity)

        # Now adding in the PostGIS routines.
        load_postgis_sql(db_name, verbosity=verbosity)

    if verbosity >= 1:
        print "Creation of spatial database %s successful." % db_name

    # Closing the connection
    connection.close()
    settings.DATABASE_NAME = db_name
    connection.settings_dict["DATABASE_NAME"] = db_name
    can_rollback = connection.creation._rollback_works()
    settings.DATABASE_SUPPORTS_TRANSACTIONS = can_rollback
    connection.settings_dict["DATABASE_SUPPORTS_TRANSACTIONS"] = can_rollback

    # Syncing the database
    call_command("syncdb", verbosity=verbosity, interactive=interactive)
Ejemplo n.º 18
0
def _send_bulk(emails, uses_multiprocessing=True, log_level=None):
    # Multiprocessing does not play well with database connection
    # Fix: Close connections on forking process
    # https://groups.google.com/forum/#!topic/django-users/eCAIY9DAfG0
    if uses_multiprocessing:
        db_connection.close()

    if log_level is None:
        log_level = get_log_level()

    sent_count, failed_count = 0, 0
    email_count = len(emails)
    logger.info('Process started, sending %s emails' % email_count)

    try:
        for email in emails:
            status = email.dispatch(log_level=log_level,
                                    disconnect_after_delivery=False)
            if status == STATUS.sent:
                sent_count += 1
                logger.debug('Successfully sent email #%d' % email.id)
            else:
                failed_count += 1
                logger.debug('Failed to send email #%d' % email.id)
    except Exception as e:
        logger.error(e, exc_info=sys.exc_info(), extra={'status_code': 500})

    connections.close()

    logger.info('Process finished, %s attempted, %s sent, %s failed' %
                (email_count, sent_count, failed_count))

    return (sent_count, failed_count)
Ejemplo n.º 19
0
def feature_new_users(request):
    # criteria for featuring would be on desc order of comments, except team but comment count > 100
    from django.db import connection
    cursor = connection.cursor()
    cursor.execute("select count(*) as cnt ,user_id from django_comments where user_id not in (3,4,6,53,1571,465) group by user_id order by cnt desc limit 50;")
    active_users = cursor.fetchall()
    connection.close()
    featured_users = Featured_User.objects.all()
    count = 3
    now = datetime.datetime.now()
    date = now.date()-datetime.timedelta(99)
    for user in active_users:
        #comment count should be greater than 100
        if int(user[0])>15 and count > 0:
            usr = get_object_or_404(User,id=int(user[1]))
            if not Featured_User.objects.filter(user=usr):
                feat_user = Featured_User(user = usr)
                feat_user.save()
                count = count -1
                params_for_mail = {'#_1':usr.username}
                send_mail(str(usr.email),'Kwippy <*****@*****.**>','featured_user',params_for_mail)
                
    #delete featured users who were featured 99 days back so that they can be featured again.
    old_feat_users = Featured_User.objects.filter(created_at__lte=date)
    old_feat_users.delete()
    featured_users = Featured_User.objects.all()
    return render_to_response("console/featured_users.html",{'featured_users':featured_users})
def create_products(queue):
    """
    Download an image from Flickr for the product on the queue and if
    successful now or previously, create the applicable product records.
    """

    # Close the connection for this process to avoid the issue discussed here:
    # http://groups.google.com/group/django-users/
    # browse_thread/thread/2c7421cdb9b99e48
    connection.close()
    product_options = ProductOption.objects.as_fields()
    while True:

        # Get next set of data from queue.
        data = queue.get()
        if data is None:
            break
        main_category, sub_category, product = data[0], data[1], data[-1]

        # Try and download a product image from Flickr.
        image = join(image_dir, "%s.jpg" % product)
        if exists(image):
            message = "Using already downloaded image for %s" % data
        else:
            try:
                images = flickr.photos_search(tags=[product], per_page=1)
                if not images:
                    raise Exception("No images found")
                url = images[0].getURL(size="Large", urlType="source")
                urlretrieve(url, image)
            except Exception, e:
                message = "Error [%s] for %s" % (e, data)
            else:
                message = "Successfully downloaded image for %s" % data
Ejemplo n.º 21
0
def create_spatial_db(test=False, verbosity=1, autoclobber=False, interactive=False):
    "Creates a spatial database based on the settings."

    # Making sure we're using PostgreSQL and psycopg2
    if settings.DATABASE_ENGINE != 'postgresql_psycopg2':
        raise Exception('Spatial database creation only supported postgresql_psycopg2 platform.')

    # Getting the spatial database name
    if test:
        db_name = get_spatial_db(test=True)
        _create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)
    else:
        db_name = get_spatial_db()
        _create_with_shell(db_name, verbosity=verbosity, autoclobber=autoclobber)

    # Creating the db language, does not need to be done on NT platforms
    #  since the PostGIS installer enables this capability.
    if os.name != 'nt':
        create_lang(db_name, verbosity=verbosity)

    # Now adding in the PostGIS routines.
    load_postgis_sql(db_name, verbosity=verbosity)

    if verbosity >= 1: print 'Creation of spatial database %s successful.' % db_name

    # Closing the connection
    connection.close()
    settings.DATABASE_NAME = db_name

    # Syncing the database
    call_command('syncdb', verbosity=verbosity, interactive=interactive)
Ejemplo n.º 22
0
def loadCart(username):
    cursor = connection.cursor()
    sql = "SELECT id FROM auth_user WHERE username = %s"
    user_id = cursor.execute(sql,[username]).fetchone()[0]
    sql = "SELECT cart_id FROM cart WHERE user_id = %s"
    cart_id = cursor.execute(sql,[user_id]).fetchone()
    if cart_id is None :
        return 0
    else:
        cart_id = cart_id[0]
        sql = "SELECT product_id,name,price,image FROM product " \
              "WHERE product_id in " \
              "(SELECT product_id FROM cart_item WHERE cart_id = %s)"
        re = cursor.execute(sql,[cart_id]).fetchall()
        data  = []
        i = 0
        for row in re:
            sql = "SELECT amount FROM cart_item WHERE cart_id = %s AND product_id = %s"
            amount = cursor.execute(sql,[cart_id,row[0]]).fetchone()[0]
            sql = "SELECT stock_amount FROM product WHERE product_id = %s"
            cur_amount = cursor.execute(sql,[row[0]]).fetchone()[0]
            if amount > cur_amount:
                amount = cur_amount
            data.append(str(row[0]) + SPLIT_SBAR + row[1] + SPLIT_SBAR + str(row[2]) + SPLIT_SBAR + row[3] + SPLIT_SBAR + str(amount) + SPLIT_LBAR)
            i = i + 1
        data.append('&' + str(i))
        connection.close()
        return data
Ejemplo n.º 23
0
 def do_db(self, callback, **args):
     try:
         return callback(**args)
     except (OperationalError, InterfaceError) as e:  # Connection has gone away
         self.logger.warn("%s, reconnecting" % e)  # TODO
         connection.close()
         return callback(**args)
Ejemplo n.º 24
0
    def run(self):
        while True:
            try:
                if self.exit:
                    return
                if self.paused:
                    self.sleep()
                    continue

                if not self.last_check:
                    if not self.queue_running():
                        self.sleep()
                        continue
                else:
                    diff = timezone.now() - self.last_check
                    if diff.total_seconds() > 30:
                        if not self.queue_running():
                            self.sleep()
                            continue

                self.assign_download()
                self.check_finished()
                self.extract()
            except OperationalError as e:
                connection.close()
                logger.info("Resetting mysql connection due to %s" % str(e))
            except Exception as e:
                logger.exception("Some error in main thread %s" % str(e))

            self.sleep()
Ejemplo n.º 25
0
    def run(self):

        while True:

            if self.abort:
                return

            last_run = cache.get("maintenance_run")

            if None is last_run:
                self.do_maintenance()
                continue

            now = datetime.now()
            diff = now - last_run
            hours = diff.total_seconds() / 60 / 60

            if hours >= 24:
                try:
                    self.do_maintenance()
                except OperationalError as e:
                    connection.close()
                    logger.info("Resetting mysql connection due to %s" % str(e))
                except Exception as e:
                        logger.exception("Error in extractor thread %s" % str(e))

            self.sleep()
Ejemplo n.º 26
0
def _send_messages(email_messages):
    connection = _get_real_backend()

    # Create a messages on a database for correct
    # tracking of their status.
    email_models = [models.Message.from_email_message(email, save=True)
                    for email in email_messages]

    # Open connection for send all messages
    connection.open()
    sended_counter = 0

    for email, model_instance in zip(email_messages, email_models):
        if hasattr(email, "priority"):
            if email.priority <= models.PRIORITY_LOW:
                model_instance.priority = email.priority
                model_instance.status = models.STATUS_PENDING
                model_instance.save()

                continue

        sended = connection.send_messages([email])

        if sended == 1:
            sended_counter += 1
            model_instance.status = models.STATUS_SENT
            model_instance.sent_at = timezone.now()
        else:
            model_instance.status = models.STATUS_FAILED

        model_instance.save()

    connection.close()
    return sended_counter
Ejemplo n.º 27
0
def _retry_send_messages():
    """
    Function that retry send failed messages.
    """

    max_retry_value = getattr(settings, "DJMAIL_MAX_RETRY_NUMBER", 3)
    queryset = models.Message.objects.filter(status=models.STATUS_FAILED)\
                        .filter(retry_count__lte=max_retry_value)\
                        .order_by("-priority", "created_at")

    connection = _get_real_backend()
    paginator = Paginator(list(queryset), getattr(settings, "DJMAIL_MAX_BULK_RETRY_SEND", 10))

    for page_index in paginator.page_range:
        connection.open()
        for message_model in paginator.page(page_index).object_list:
            email = message_model.get_email_message()
            sended = connection.send_messages([email])

            if sended == 1:
                message_model.status = models.STATUS_SENT
                message_model.sent_at = timezone.now()
            else:
                message_model.retry_count += 1

            message_model.save()

        connection.close()
Ejemplo n.º 28
0
def get_count(request, model, refresh, processor, context):
    opts = model._meta

    # Build a queryset through the context which is toggled by
    # the parameter.
    processor = processor(context=context, tree=model)
    queryset = processor.get_queryset(request=request)

    # Get count from cache or database
    label = ':'.join([opts.app_label, opts.module_name, 'count'])
    key = cache_key(label, kwargs={'queryset': queryset})

    cache = get_cache(avocado_settings.DATA_CACHE)

    if refresh:
        count = None
    else:
        count = cache.get(key)

    if count is None:
        count = queryset.values('pk').distinct().count()
        cache.set(key, count, timeout=NEVER_EXPIRE)

    # Close the connection in the thread to prevent 'idle in transaction'
    # situtations.
    from django.db import connection
    connection.close()

    return count
Ejemplo n.º 29
0
def addToFav(user_id,pid):
    cursor = connection.cursor()
    connection.close()
    if checkFavExist(user_id) is True:
        updateFav(user_id,pid)
    else:
        insertFav(user_id,pid)
Ejemplo n.º 30
0
def set_distribution(ldamodel):
    """Transformacion para dejar los documentos como listas de los word id"""

    insert = """INSERT INTO application_documentdistribution (document_id, ldamodel_id, distribution) (
    SELECT 
        D.id, 
        SL.ldamodel_id, 
        group_concat(REPEAT(CONCAT(HEX(F.word_id),','),F.frequency) separator '') as distribution
    FROM application_frequency F
    JOIN application_wordldamodel W ON F.word_id = W.word_id AND W.ldamodel_id = """+str(ldamodel.id)+"""
    JOIN application_document D ON F.document_id = D.id
    JOIN application_dataset S ON S.id = D.dataset_id
    JOIN application_datasetldamodel SL ON S.id = SL.dataset_id
    WHERE SL.ldamodel_id = """+str(ldamodel.id)+"""
    GROUP BY D.id
    );"""

    print insert

    cursor = connection.cursor()
    cursor.execute("SET GLOBAL group_concat_max_len = 5000000;")
    cursor.execute("COMMIT")
    cursor.execute("DELETE FROM application_documentdistribution WHERE ldamodel_id = %s" % str(ldamodel.id))
    cursor.execute("COMMIT")
    cursor.execute(insert)
    cursor.execute("COMMIT")
    connection.close()

    print "  Distribuciones Asignadas..."

    return HttpResponse("Set distribution completed")
Ejemplo n.º 31
0
    def perform_work(self, body):
        try:
            flush = body.get('event') == 'FLUSH'
            if flush:
                self.last_event = ''
            if not flush:
                event_map = {
                    'job_id': JobEvent,
                    'ad_hoc_command_id': AdHocCommandEvent,
                    'project_update_id': ProjectUpdateEvent,
                    'inventory_update_id': InventoryUpdateEvent,
                    'system_job_id': SystemJobEvent,
                }

                job_identifier = 'unknown job'
                for key, cls in event_map.items():
                    if key in body:
                        job_identifier = body[key]
                        break

                self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})'  # noqa

                if body.get('event') == 'EOF':
                    try:
                        final_counter = body.get('final_counter', 0)
                        logger.info(
                            'Event processing is finished for Job {}, sending notifications'
                            .format(job_identifier))
                        # EOF events are sent when stdout for the running task is
                        # closed. don't actually persist them to the database; we
                        # just use them to report `summary` websocket events as an
                        # approximation for when a job is "done"
                        emit_channel_notification(
                            'jobs-summary',
                            dict(group_name='jobs',
                                 unified_job_id=job_identifier,
                                 final_counter=final_counter))
                        # Additionally, when we've processed all events, we should
                        # have all the data we need to send out success/failure
                        # notification templates
                        uj = UnifiedJob.objects.get(pk=job_identifier)

                        if isinstance(uj, Job):
                            # *actual playbooks* send their success/failure
                            # notifications in response to the playbook_on_stats
                            # event handling code in main.models.events
                            pass
                        elif hasattr(uj, 'send_notification_templates'):
                            handle_success_and_failure_notifications.apply_async(
                                [uj.id])
                    except Exception:
                        logger.exception(
                            'Worker failed to emit notifications: Job {}'.
                            format(job_identifier))
                    return

                event = cls.create_from_data(**body)
                self.buff.setdefault(cls, []).append(event)

            retries = 0
            while retries <= self.MAX_RETRIES:
                try:
                    self.flush(force=flush)
                    break
                except (OperationalError, InterfaceError, InternalError):
                    if retries >= self.MAX_RETRIES:
                        logger.exception(
                            'Worker could not re-establish database connectivity, giving up on one or more events.'
                        )
                        return
                    delay = 60 * retries
                    logger.exception(
                        'Database Error Saving Job Event, retry #{i} in {delay} seconds:'
                        .format(i=retries + 1, delay=delay))
                    django_connection.close()
                    time.sleep(delay)
                    retries += 1
                except DatabaseError:
                    logger.exception('Database Error Saving Job Event')
                    break
        except Exception as exc:
            tb = traceback.format_exc()
            logger.error('Callback Task Processor Raised Exception: %r', exc)
            logger.error('Detail: {}'.format(tb))
Ejemplo n.º 32
0
Archivo: tests.py Proyecto: zw3n/django
 def update_birthday_slowly():
     Person.objects.update_or_create(
         first_name='John', defaults={'birthday': birthday_sleep})
     # Avoid leaking connection for Oracle
     connection.close()
Ejemplo n.º 33
0
def _send_bulk(emails, uses_multiprocessing=True, log_level=None):
    # Multiprocessing does not play well with database connection
    # Fix: Close connections on forking process
    # https://groups.google.com/forum/#!topic/django-users/eCAIY9DAfG0
    if uses_multiprocessing:
        db_connection.close()

    if log_level is None:
        log_level = get_log_level()

    sent_emails = []
    failed_emails = []  # This is a list of two tuples (email, exception)
    email_count = len(emails)

    logger.info('Process started, sending %s emails' % email_count)

    def send(email):
        try:
            email.dispatch(log_level=log_level, commit=False)
            sent_emails.append(email)
            logger.debug('Successfully sent email #%d' % email.id)
        except Exception as e:
            logger.debug('Failed to send email #%d' % email.id)
            failed_emails.append((email, e))

    # Prepare emails before we send these to threads for sending
    # So we don't need to access the DB from within threads
    for email in emails:
        # Sometimes this can fail, for example when trying to render
        # email from a faulty Django template
        try:
            email.prepare_email_message()
        except Exception as e:
            failed_emails.append((email, e))

    number_of_threads = min(get_threads_per_process(), email_count)
    pool = ThreadPool(number_of_threads)

    pool.map(send, emails)
    pool.close()
    pool.join()

    connections.close()

    # Update statuses of sent and failed emails
    email_ids = [email.id for email in sent_emails]
    OutgoingEmail.objects.filter(id__in=email_ids).update(status=STATUS.sent)

    email_ids = [email.id for (email, e) in failed_emails]
    OutgoingEmail.objects.filter(id__in=email_ids).update(status=STATUS.failed)

    # If log level is 0, log nothing, 1 logs only sending failures
    # and 2 means log both successes and failures
    if log_level >= 1:

        logs = []
        for (email, exception) in failed_emails:
            logs.append(
                Log(email=email, status=STATUS.failed,
                    message=str(exception),
                    exception_type=type(exception).__name__)
            )

        if logs:
            Log.objects.bulk_create(logs)

    if log_level == 2:

        logs = []
        for email in sent_emails:
            logs.append(Log(email=email, status=STATUS.sent))

        if logs:
            Log.objects.bulk_create(logs)

    logger.info(
        'Process finished, %s attempted, %s sent, %s failed' % (
            email_count, len(sent_emails), len(failed_emails)
        )
    )

    return len(sent_emails), len(failed_emails)
Ejemplo n.º 34
0
def release_connection():
    """
    Close thread's conneciton to database and increment database resource count.
    """
    connection.close()
    conn_sem.release()
Ejemplo n.º 35
0
    def main_loop(self, options):
        last_schedule = last_dispatcher_check = time.time()

        while True:
            try:
                try:
                    # Compute the timeout
                    now = time.time()
                    timeout = min(
                        SCHEDULE_INTERVAL - (now - last_schedule),
                        PING_INTERVAL - (now - last_dispatcher_check),
                    )
                    # If some actions are remaining, decrease the timeout
                    if any([self.events[k] for k in self.events.keys()]):
                        timeout = min(timeout, 2)
                    # Wait at least for 1ms
                    timeout = max(timeout * 1000, 1)

                    # Wait for data or a timeout
                    sockets = dict(self.poller.poll(timeout))
                except zmq.error.ZMQError:
                    continue

                if sockets.get(self.pipe_r) == zmq.POLLIN:
                    self.logger.info("[POLL] Received a signal, leaving")
                    break

                # Command socket
                if sockets.get(self.controler) == zmq.POLLIN:
                    while self.controler_socket(
                    ):  # Unqueue all pending messages
                        pass

                # Events socket
                if sockets.get(self.event_socket) == zmq.POLLIN:
                    self.logger.info("[EVENT] handling events")
                    while self.read_event_socket(
                    ):  # Unqueue all pending messages
                        pass
                    # Wait for the next iteration to handle the event.
                    # In fact, the code that generated the event (lava-logs or
                    # lava-server-gunicorn) needs some time to commit the
                    # database transaction.
                    # If we are too fast, the database object won't be
                    # available (or in the right state) yet.
                    continue

                # Inotify socket
                if sockets.get(self.inotify_fd) == zmq.POLLIN:
                    os.read(self.inotify_fd, 4096)
                    if self.auth is not None:
                        self.logger.info(
                            "[AUTH] Reloading certificates from %s",
                            options["slaves_certs"],
                        )
                        self.auth.configure_curve(
                            domain="*", location=options["slaves_certs"])
                    else:
                        self.logger.error(
                            "[AUTH] New certificates in %s but encryption is disabled",
                            options["slaves_certs"],
                        )

                # Check dispatchers status
                now = time.time()
                if now - last_dispatcher_check > PING_INTERVAL:
                    for hostname, dispatcher in self.dispatchers.items():
                        if (dispatcher.online and now - dispatcher.last_msg >
                                DISPATCHER_TIMEOUT):
                            if hostname == "lava-logs":
                                self.logger.error(
                                    "[STATE] lava-logs goes OFFLINE")
                            else:
                                self.logger.error(
                                    "[STATE] Dispatcher <%s> goes OFFLINE",
                                    hostname)
                            self.dispatchers[hostname].go_offline()
                    last_dispatcher_check = now

                # Limit accesses to the database. This will also limit the rate of
                # CANCEL and START messages
                if time.time() - last_schedule > SCHEDULE_INTERVAL:
                    if self.dispatchers["lava-logs"].online:
                        schedule(self.logger)

                        # Dispatch scheduled jobs
                        with transaction.atomic():
                            self.start_jobs()
                    else:
                        self.logger.warning(
                            "lava-logs is offline: can't schedule jobs")

                    # Handle canceling jobs
                    with transaction.atomic():
                        self.cancel_jobs()

                    # Do not count the time taken to schedule jobs
                    last_schedule = time.time()
                else:
                    # Cancel the jobs and remove the jobs from the set
                    if self.events["canceling"]:
                        with transaction.atomic():
                            self.cancel_jobs(partial=True)
                        self.events["canceling"] = set()
                    # Schedule for available device-types
                    if self.events["available_dt"]:
                        jobs = schedule(self.logger,
                                        self.events["available_dt"])
                        self.events["available_dt"] = set()
                        # Dispatch scheduled jobs
                        with transaction.atomic():
                            self.start_jobs(jobs)

            except (OperationalError, InterfaceError):
                self.logger.info("[RESET] database connection reset.")
                # Closing the database connection will force Django to reopen
                # the connection
                connection.close()
                time.sleep(2)
Ejemplo n.º 36
0
def query(request):
    instance_name = request.POST.get('instance_name')
    sql_content = request.POST.get('sql_content')
    db_name = request.POST.get('db_name')
    limit_num = request.POST.get('limit_num')

    result = {'status': 0, 'msg': 'ok', 'data': {}}
    try:
        instance = Instance.objects.get(instance_name=instance_name)
    except Instance.DoesNotExist:
        result['status'] = 1
        result['msg'] = '实例不存在'
        return result

    # 服务器端参数验证
    if sql_content is None or db_name is None or instance_name is None or limit_num is None:
        result['status'] = 1
        result['msg'] = '页面提交参数可能为空'
        return HttpResponse(json.dumps(result),
                            content_type='application/json')

    sql_content = sql_content.strip()

    # 获取用户信息
    user = request.user

    # 过滤注释语句和非查询的语句
    sql_content = ''.join(
        map(
            lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub(
                '', x, count=1), sql_content.splitlines(1))).strip()
    # 去除空行
    sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)

    # 语法判断
    sql_list = sqlparse.split(sql_content)
    for sql in sql_list:
        if re.match(r"^select|^show|^explain", sql.lower()):
            break
        else:
            result['status'] = 1
            result['msg'] = '仅支持^select|^show|^explain语法,请联系管理员!'
            return HttpResponse(json.dumps(result),
                                content_type='application/json')

    # 执行第一条有效sql
    sql_content = sql_list[0].rstrip(';')

    try:
        # 查询权限校验
        priv_check_info = query_priv_check(user, instance_name, db_name,
                                           sql_content, limit_num)
        if priv_check_info['status'] == 0:
            limit_num = priv_check_info['data']['limit_num']
            priv_check = priv_check_info['data']['priv_check']
        else:
            return HttpResponse(json.dumps(priv_check_info),
                                content_type='application/json')
        limit_num = 0 if re.match(r"^explain",
                                  sql_content.lower()) else limit_num

        # 查询检查
        query_engine = get_engine(instance=instance)
        filter_result = query_engine.query_check(db_name=db_name,
                                                 sql=sql_content,
                                                 limit_num=limit_num)
        if filter_result.get(
                'bad_query') and SysConfig().get('disable_star') is True:
            result['status'] = 1
            result['msg'] = filter_result.get('msg')
            return HttpResponse(json.dumps(result),
                                content_type='application/json')
        else:
            sql_content = filter_result['filtered_sql']
        sql_content = sql_content + ';'

        # 执行查询语句,统计执行时间
        t_start = time.time()
        query_result = query_engine.query(db_name=str(db_name),
                                          sql=sql_content,
                                          limit_num=limit_num)
        t_end = time.time()
        query_result.query_time = "%5s" % "{:.4f}".format(t_end - t_start)

        # 数据脱敏,同样需要检查配置,是否开启脱敏,语法树解析是否允许出错继续执行
        hit_rule = 0 if re.match(r"^select", sql_content.lower(
        )) else 2  # 查询是否命中脱敏规则,0, '未知', 1, '命中', 2, '未命中'
        masking = 2  # 查询结果是否正常脱敏,1, '是', 2, '否'
        t_start = time.time()
        # 仅对正确查询的语句进行脱敏
        if SysConfig().get('data_masking') and re.match(
                r"^select",
                sql_content.lower()) and query_result.error is None:
            try:
                query_result = query_engine.query_masking(
                    db_name=db_name, sql=sql_content, resultset=query_result)
                if SysConfig().get(
                        'query_check') and query_result.is_critical is True:
                    masking_result = {
                        'status': query_result.status,
                        'msg': query_result.error,
                        'data': query_result.__dict__
                    }
                    return HttpResponse(json.dumps(masking_result),
                                        content_type='application/json')
                else:
                    # 实际未命中, 则显示为未做脱敏
                    if query_result.is_masked:
                        masking = 1
                        hit_rule = 1
            except Exception:
                logger.error(traceback.format_exc())
                # 报错, 未脱敏, 未命中
                hit_rule = 2
                masking = 2
                if SysConfig().get('query_check'):
                    result['status'] = 1
                    result['msg'] = '脱敏数据报错,请联系管理员'
                    return HttpResponse(json.dumps(result),
                                        content_type='application/json')

        t_end = time.time()
        query_result.mask_time = "%5s" % "{:.4f}".format(t_end - t_start)
        sql_result = query_result.__dict__

        result['data'] = sql_result

        # 成功的查询语句记录存入数据库
        if sql_result.get('error'):
            pass
        else:
            query_log = QueryLog()
            query_log.username = user.username
            query_log.user_display = user.display
            query_log.db_name = db_name
            query_log.instance_name = instance_name
            query_log.sqllog = sql_content
            if int(limit_num) == 0:
                limit_num = int(sql_result['affected_rows'])
            else:
                limit_num = min(int(limit_num),
                                int(sql_result['affected_rows']))
            query_log.effect_row = limit_num
            query_log.cost_time = query_result.query_time
            query_log.priv_check = priv_check
            query_log.hit_rule = hit_rule
            query_log.masking = masking
            # 防止查询超时
            try:
                query_log.save()
            except:
                connection.close()
                query_log.save()
    except Exception as e:
        logger.error(traceback.format_exc())
        result['status'] = 1
        result['msg'] = str(e)

    # 返回查询结果
    try:
        return HttpResponse(json.dumps(result,
                                       cls=ExtendJSONEncoder,
                                       bigint_as_string=True),
                            content_type='application/json')
    except Exception:
        return HttpResponse(json.dumps(result,
                                       default=str,
                                       bigint_as_string=True,
                                       encoding='latin1'),
                            content_type='application/json')
Ejemplo n.º 37
0
 def wrapper(*args, **kwagrs):
     try:
         connection.connection.ping()
     except Exception:
         connection.close()
     return func(*args, **kwagrs)
Ejemplo n.º 38
0
    def execute_workflow(self, manual=False):
        """执行上线单"""
        workflow_detail = self.workflow
        if workflow_detail.is_manual == 1:
            return self.execute(db_name=workflow_detail.db_name, sql=workflow_detail.sql_content)
        execute_result = ReviewSet(full_sql=workflow_detail.sql_content)
        inception_engine = InceptionEngine()
        if workflow_detail.is_backup == '是':
            str_backup = "--enable-remote-backup;"
        else:
            str_backup = "--disable-remote-backup;"
        # 根据inception的要求,执行之前最好先split一下
        sql_split = "/*--user=%s; --password=%s; --host=%s; --enable-execute;--port=%d; --enable-ignore-warnings;--enable-split;*/\
             inception_magic_start;\
             use %s;\
             %s\
             inception_magic_commit;" % (
            self.user,
            self.password,
            self.host,
            self.port,
            workflow_detail.db_name, workflow_detail.sql_content)
        split_result = inception_engine.query(sql=sql_split)

        execute_result.rows = []
        # 对于split好的结果,再次交给inception执行.这里无需保持在长连接里执行,短连接即可.
        for splitRow in split_result.rows:
            sql_tmp = splitRow[1]
            sql_execute = "/*--user=%s;--password=%s;--host=%s;--enable-execute;--port=%d; --enable-ignore-warnings;%s*/\
                    inception_magic_start;\
                    %s\
                    inception_magic_commit;" % (
                self.user,
                self.password,
                self.host,
                self.port,
                str_backup,
                sql_tmp)

            one_line_execute_result = inception_engine.query(sql=sql_execute)
            # 执行, 把结果转换为ReviewSet
            for sqlRow in one_line_execute_result.to_dict():
                execute_result.rows.append(ReviewResult(
                    id=sqlRow['ID'],
                    stage=sqlRow['stage'],
                    errlevel=sqlRow['errlevel'],
                    stagestatus=sqlRow['stagestatus'],
                    errormessage=sqlRow['errormessage'],
                    sql=sqlRow['SQL'],
                    affected_rows=sqlRow['Affected_rows'],
                    actual_affected_rows=sqlRow['Affected_rows'],
                    sequence=sqlRow['sequence'],
                    backup_dbname=sqlRow['backup_dbname'],
                    execute_time=sqlRow['execute_time'],
                    sqlsha1=sqlRow['sqlsha1']))

            # 每执行一次,就将执行结果更新到工单的execute_result
            workflow_detail.execute_result = execute_result.json()
            from django.db import connection
            if connection.connection is not None:
                connection.close()
            workflow_detail.save()

        # 二次加工一下,目的是为了和sqlautoReview()函数的return保持格式一致,便于在detail页面渲染.
        execute_result.status = "已正常结束"
        for sqlRow in execute_result.rows:
            # 如果发现任何一个行执行结果里有errLevel为1或2,并且stagestatus列没有包含Execute Successfully字样,则判断最终执行结果为有异常.
            if (sqlRow.errlevel == 1 or sqlRow.errlevel == 2) and re.match(r"\w*Execute Successfully\w*",
                                                                           sqlRow.stagestatus) is None:
                execute_result.status = "执行有异常"
                execute_result.error = "Line {0} has error/warning: {1}".format(sqlRow.id, sqlRow.errormessage)

        return execute_result
Ejemplo n.º 39
0
def stationSearch(request):
    type = request.POST['type']
    keyword = request.POST['keyword']
    # print("Check Post -", type, keyword)

    print("Check Post -", type, keyword)

    if type == 'statnm':
        try:
            cursor = connection.cursor()
            strSql = "select evst.statNm,evst.addr,evst.lat,evst.lng,evst.useTime,evst.busicall,descInfo,congestion " \
                     "from ev_station evst \
                      join ev_real_time evtm on(evst.evsn=evtm.evsn),(select c.evSn, group_concat(des SEPARATOR '\n') as descInfo " \
                     "from (select a.evSn, a.chgerId, concat('기기 번호 :', a.chgerId , ' ( 상태 : ' , (select codeName from ev_code_inf where codeId = a.stat) , ', 충전타입 : ' , GROUP_CONCAT((select codeName from ev_code_inf where codeId = b.chgerType) SEPARATOR ','),')') as des " \
                     "from ev.ev_station_status a, ev.ev_station_chgertype b " \
                     "where	a.evSn = b.evSn group by a.evSn, a.chgerId) c group by c.evSn) info " \
                     "where evst.evSn = info.evSn and evst.statNm Like '%" + keyword + "%';"

            result = cursor.execute(strSql)
            stations = cursor.fetchall()
            print('stations - ', stations)

            connection.commit()
            connection.close()

        except:
            connection.rollback()
            print('Failed selecting in stations')


    elif type == 'addr':
        try:
            cursor = connection.cursor()
            strSql = "select evst.statNm,evst.addr,evst.lat,evst.lng,evst.useTime,evst.busicall, info.descInfo " \
                     "from ev.ev_station evst,(select c.evSn, group_concat(des SEPARATOR '\n') as descInfo " \
                     "from (select a.evSn, a.chgerId, concat('기기 번호 :', a.chgerId , ' ( 상태 : ' , (select codeName from ev_code_inf where codeId = a.stat) , ', 충전타입 : ' , GROUP_CONCAT((select codeName from ev_code_inf where codeId = b.chgerType) SEPARATOR ','),')') as des " \
                     "from ev.ev_station_status a, ev.ev_station_chgertype b " \
                     "where	a.evSn = b.evSn group by a.evSn, a.chgerId) c group by c.evSn) info " \
                     "where evst.evSn = info.evSn and evst.addr Like '%" + keyword + "%';"

            result = cursor.execute(strSql)
            stations = cursor.fetchall()
            print('stations - ', stations)

            connection.commit()
            connection.close()
        except:
            connection.rollback()
            print('Failed selecting in stations')
    list = []
    cnt = 0
    for station in stations:
        row = {'statNm': station[0],
               'addr': station[1],
               'lat': station[2],
               'lng': station[3],
               'useTime': station[4],
               'busiCall': station[5],
               'descInfo': station[6],
               'congestion': station[7],
               }
        list.append(row)
        cnt += 1
        if cnt == 100:
            break
    for a in list:
        print("check - ", a)
    print(len(list))

    return JsonResponse(list, safe=False)
Ejemplo n.º 40
0
    def post(self, request):
        res = {'status': False}
        option = request.GET.get('option', None)
        if option:
            if option == 'add':
                addr = request.POST.get('addr', None)
                named = request.POST.get('named', None)
                newer = request.POST.get('newer', None)
                nper = request.POST.get('nper', None)
                time_rule = request.POST.get('time_rule', None)
                first_status = request.POST.get('first_status', None)

                if danger.xss(named):
                    return JsonResponse({'status': False, 'msg': 'xss'})

                if (newer == 'true') or (newer == True):
                    contact = user_modles.Contact()
                    contact.email = addr
                    contact.first_named = named
                    contact.save()
                else:
                    contact = user_modles.Contact.objects.filter(
                        Q(email=addr) & Q(status=True))
                    contact = contact[0]

                visit_time = request.POST.get('visit_time', None)
                email_template = request.POST.get('email_template', None)

                ea = models.EmailApply()

                if (first_status == 'false') or (first_status == False):
                    ea.first_status = False
                ea.nper = nper
                ea.now_time_rule = time_rule
                ea.contact = user_modles.Contact.objects.get(id=contact.id)
                ea.visit_time = visit_time
                ea.email_template = models.EmailTemplate.objects.get(
                    id=int(email_template))

                ea.save()

                connection.close()
                worker = APSTask.TaskProcess([ea.id], common.WAY[1][0])
                worker.start()

                res['id'] = ea.id
                res['status'] = True

            if option == 'tasker':
                numed = settings.EVERY
                tasks = []
                index = 0
                ids = request.POST.get('ids', None)

                ids = ids.split(',')

                nper = request.POST.get('nper', None)
                time_rule = request.POST.get('time_rule', None)

                visit_time = request.POST.get('visit_time', None)
                email_template = request.POST.get('email_template', None)

                first_status = request.POST.get('first_status', None)
                if (first_status == 'false') or (first_status == False):
                    first_status = False
                else:
                    first_status = True

                for e in ids:
                    contact = user_modles.Contact.objects.filter(
                        Q(id=e) & Q(status=True))
                    if contact:
                        contact = contact[0]

                        ea = models.EmailApply()
                        ea.nper = nper
                        ea.now_time_rule = time_rule
                        ea.contact = contact
                        ea.visit_time = visit_time
                        ea.email_template = models.EmailTemplate.objects.get(
                            id=int(email_template))
                        ea.first_status = first_status
                        ea.save()

                        tasks.append(str(ea.id))
                        if len(tasks) % 5 == 0:
                            time.sleep(0.5)

                index = len(tasks)
                tasks = [
                    tasks[i:i + numed] for i in range(0, len(tasks), numed)
                ]

                for ts in tasks:
                    ts = '_'.join(ts)
                    running = web_models.Running()
                    running.way = common.WAY[1][0]
                    running.ids = ts
                    running.done_status = False
                    running.block_status = False
                    running.save()
                res['status'] = True
                res['numed'] = index

            if option == 'update':
                pk = request.POST.get('id', None)
                if pk is None:
                    return JsonResponse(res)
                send_status = request.POST.get('send_status', None)

                visit_time = request.POST.get('visit_time', None)
                if int(send_status) == 1:
                    send_status = False
                else:
                    send_status = True
                ea = models.EmailApply.objects.get(id=pk)
                ea.visit_time = visit_time
                ea.send_status = send_status
                ea.save()
                res['status'] = True
                res['instance'] = {
                    'visit_time': visit_time,
                    'send_status': send_status
                }

            if option == 'trash':
                pk = request.GET.get('id', None)
                if pk is None:
                    pk = request.POST.get('id', None)

                ea = models.EmailApply.objects.get(id=pk)
                ea.status = False
                ea.save()
                res['status'] = True
                res['instance'] = {
                    'named': ea.contact.first_named,
                    'addr': ea.contact.email,
                    'service': ea.email_template.service
                }

        return JsonResponse(res)
Ejemplo n.º 41
0
def ExecMakeTarget(storemedia, targetvguuid, targetHost, clientiqn,
                   serviceName, storageSize, aagroup, clumpgroup, subnet,
                   owner):
    chosenVG = VG.objects.get(vguuid=targetvguuid)
    clientiqnHash = hashlib.sha1(clientiqn).hexdigest()[:8]
    iqnTarget = "".join(
        ["iqn.2014.01.", targetHost, ":", serviceName, ":", clientiqnHash])
    try:
        targets = Target.objects.filter(
            iqntar__contains="".join([serviceName, ":", clientiqnHash]))
        if len(targets) == 0:
            raise ObjectDoesNotExist
        for t in targets:
            iqnComponents = t.iqntar.split(':')
            if ((serviceName == iqnComponents[1])
                    and (clientiqnHash == iqnComponents[2])):
                logger.info(
                    'Target already exists for (serviceName=%s,clientiqn=%s) tuple'
                    % (serviceName, clientiqn))
                try:
                    existingTargetstoremedia = LV.objects.get(
                        target=t).vg.storemedia
                except:
                    logger.error(
                        "Target %s exists in DB but LV does not, inconsistent"
                        % (t.iqntar))
                    return (
                        -1,
                        "Target %s exists in DB but LV does not, inconsistent"
                        % (t.iqntar))

                if (existingTargetstoremedia == storemedia):
                    return (1, t.iqntar)
                else:
                    errorStr = "Target %s on DIFFERENT storemedia %s already exists." % (
                        t.iqntar, existingTargetstoremedia)
                    logger.info(errorStr)
                    return (-1, errorStr)
            else:
                raise ObjectDoesNotExist
    except ObjectDoesNotExist:
        #    try:
        #        if subnet != 'public':
        #            IPRange.objects.get(iprange=subnet)
        #    except:
        #        logger.debug('Subnet %s not found on host %s while trying to create target %s, creation aborted, contact admin' %(subnet, targetHost, iqnTarget ))
        #        return (-1,"Invalid subnet specified")

        (quotaFlag, quotaReason) = CheckUserQuotas(float(storageSize), owner)
        if quotaFlag == -1:
            logger.debug(quotaReason)
            return (-1, quotaReason)
        else:
            logger.info(quotaReason)
        logger.info(
            "Creating new target for request {%s %s %s}, this is the generated iSCSItarget: %s"
            % (clientiqn, serviceName, str(storageSize), iqnTarget))
        targethost = StorageHost.objects.get(dnsname=targetHost)
        p = PollServer(targetHost)
        storeip1 = targethost.storageip1
        storeip2 = targethost.storageip2
        if subnet != 'public':
            try:
                storeip1 = Interface.objects.get(
                    owner=owner,
                    storagehost=targethost,
                    iprange__iprange=unicode(subnet)).ip
                storeip2 = storeip1
            except:
                logger.error(
                    'Chosen host %s is missing IP addresses in requested subnet'
                    % (targethost, ))
                return (
                    -1,
                    'Error in host network configuration or ownership for the required subnet, contact storage admin'
                )

        if p.CreateTarget(iqnTarget, clientiqn, str(storageSize), storeip1,
                          storeip2, targetvguuid) == 1:
            logger.info("SUCCESSFUL TARGET RUN")
            BASE_DIR = os.path.dirname(os.path.dirname(__file__))
            config = ConfigParser.RawConfigParser()
            config.read(os.path.join(BASE_DIR, 'saturn.ini'))
            (devDic, tarDic) = ParseSCSTConf(
                os.path.join(BASE_DIR,
                             config.get('saturnring', 'iscsiconfigdir'),
                             targetHost + '.scst.conf'))
            logger.info("DevDic = " + str(devDic))
            logger.info("TarDic = " + str(tarDic))
            if iqnTarget in tarDic:
                newTarget = Target(owner=owner,
                                   targethost=targethost,
                                   iqnini=clientiqn,
                                   iqntar=iqnTarget,
                                   sizeinGB=float(storageSize),
                                   storageip1=storeip1,
                                   storageip2=storeip2)
                newTarget.save()
                lvDict = p.GetLVs(targetvguuid)
                lvName = 'lvol-' + hashlib.md5(iqnTarget +
                                               '\n').hexdigest()[0:8]
                logger.info("Looking for %s in lvDict %s" %
                            (lvName, str(lvDict)))
                if lvName in lvDict:
                    newLV = LV(
                        target=newTarget,
                        vg=chosenVG,
                        lvname=lvName,
                        lvsize=storageSize,
                        #lvthinmapped=lvDict[lvName]['Mapped size'],
                        lvuuid=lvDict[lvName]['LV UUID'])
                    newLV.save()
                    chosenVG.CurrentAllocGB = max(
                        0, chosenVG.CurrentAllocGB) + float(storageSize)
                    chosenVG.maxavlGB = max(
                        0, chosenVG.maxavlGB - float(storageSize))
                    chosenVG.save()
            else:
                logger.error(
                    'Error - could not use ParseSCSTConf while working with target creation of %s, check if git and %s are in sync'
                    % (iqnTarget, targethost + '.scst.conf'))
                return (-1, "CreateTarget returned error 2, contact admin")

            tar = Target.objects.get(iqntar=iqnTarget)
            aa = AAGroup(name=aagroup, target=tar)
            aa.save()
            aa.hosts.add(targethost)
            aa.save()
            newTarget.aagroup = aa
            cg = ClumpGroup(name=clumpgroup, target=tar)
            cg.save()
            cg.hosts.add(targethost)
            cg.save()
            newTarget.clumpgroup = cg
            newTarget.save()
            connection.close(
            )  #close DB connection to prevent RQ connection reset error in PG database logs
            return (0, iqnTarget)
        else:
            connection.close(
            )  #close DB connection to prevent RQ connection reset error in PG database logs
            logger.error('CreateTarget did not work')
            return (-1, "CreateTarget returned error 1, contact admin")
Ejemplo n.º 42
0
def directionSearch(request):
    start = request.POST['start']
    goal = request.POST['goal']

    print("Check Post -", start, goal)
    startGeo = getGeocode(start)
    goalGeo = getGeocode(goal)
    if startGeo == [] or goalGeo == [] :
        list = "NoData"
        return JsonResponse(list, safe=False)

    startLocation = startGeo[2] + "," + startGeo[3]
    goalLocation = goalGeo[2] + "," + goalGeo[3]
    print(startLocation)
    print(goalLocation)

    directionDataList = getDirectionApi(startLocation, goalLocation)
    print(directionDataList)

    cursor = connection.cursor()
    list = []
    for directionData in directionDataList:
        latitude = str(directionData[1])
        longtitude = str(directionData[0])
        # print(latitude)
        # print(longtitude)

        try:

            strSql = "select evst.statNm,evst.addr,evst.lat,evst.lng,evst.useTime,evst.busicall,descInfo,congestion,(6371*acos(cos(radians(" + latitude + "))*cos(radians(evst.lat))*cos(radians(evst.lng)-radians(" + longtitude + "))+sin(radians(" + latitude + "))*sin(radians(evst.lat))))AS distance, info.descInfo from ev.ev_station evst join ev_real_time evtm on(evst.evsn=evtm.evsn),(select c.evSn, group_concat(des SEPARATOR '\n') as descInfo from (select	a.evSn, a.chgerId, concat('기기 번호 :', a.chgerId , ' ( 상태 : ' , (select codeName from ev.ev_code_inf where codeId = a.stat) , ', 충전타입 : ', GROUP_CONCAT((select codeName from ev.ev_code_inf where codeId = b.chgerType) SEPARATOR ','),')') as des from ev.ev_station_status a, ev.ev_station_chgertype b where a.evSn = b.evSn group by a.evSn, a.chgerId) c group by c.evSn) info where evst.evSn = info.evSn HAVING distance <= 1 ORDER BY distance;"

            result = cursor.execute(strSql)
            stations = cursor.fetchall()
            print('stations - ', stations)

            cnt = 0
            for station in stations:
                row = {'statnm': station[0],
                       'addr': station[1],
                       'lat': station[2],
                       'lng': station[3],
                       'useTime': station[4],
                       'busicall': station[5],
                       'info': station[6],
                       'congestion': station[7],
                       }
                list.append(row)
                cnt += 1
                if cnt == 3:
                    break

        except:
            connection.rollback()
            print('Failed selecting in stations')
    connection.commit()
    connection.close()

    # for a in list:
    #     print("check - ", a)
    # print(len(list))

    return JsonResponse(list, safe=False)
Ejemplo n.º 43
0
 def run_async_rich_menu_check(self):
     if not self.current_rich_menu:
         self.set_rich_menu(force_refresh=False)
     from django.db import connection
     connection.close()
Ejemplo n.º 44
0
def drop_db():
    # close django's connection to the database
    connection.close()
    return subprocess.check_call(DROP_DB_CMD, shell=True)
Ejemplo n.º 45
0
 def update_version_tuple(self):
     if get_database_version is None:
         if not hasattr(self, "_model_history"):
             # list of (model, database) versions
             self._model_history = [("0", "0")]
             self.model_version_mismatch = False
             # do not log, always true on client
             # self.log("cannot get model version, file missing ... ?", logging_tools.LOG_LEVEL_WARN)
     else:
         _database_v = get_database_version()
         _model_v = get_models_version()
         if not hasattr(self, "_model_history"):
             self._model_history = [(_model_v, _database_v)]
             self.model_version_mismatch = False
             self.log(
                 "Starting Model version is {}, database version is {}".
                 format(
                     self._model_history[0][0],
                     self._model_history[0][1],
                 ))
         else:
             # this is only a rough start, we have to add grace periods and restart options, TODO, FIXME
             if _model_v != self._model_history[-1][0]:
                 _cs = "Model version changed from {} to {} (database from version {} version is {}, history has now {:d} entries)".format(
                     self._model_history[-1][0],
                     _model_v,
                     self._model_history[-1][1],
                     _database_v,
                     len(self._model_history) + 1,
                 )
                 self._model_history.append((_model_v, _database_v))
                 self.model_version_mismatch = True
                 if not is_debug_run():
                     self.log("{}".format(_cs),
                              logging_tools.LOG_LEVEL_ERROR)
                 else:
                     self.log(
                         "{} (ignoring due to debug run)".format(_cs, ),
                         logging_tools.LOG_LEVEL_WARN)
             if self.model_version_mismatch and ICSWVersion is not None:
                 try:
                     # close database connection to disable query cache
                     connection.close()
                 except:
                     pass
                 _highest = self._model_history[-1]
                 try:
                     _h_idx = ICSWVersion.objects.all().order_by(
                         "-insert_idx").values_list("insert_idx",
                                                    flat=True)[0]
                 except:
                     self.log(
                         "unable to determine highest ICSWVersion.insert_idx: {}"
                         .format(process_tools.get_except_info()),
                         logging_tools.LOG_LEVEL_ERROR)
                 else:
                     _dict = {
                         _entry.name: _entry.version
                         for _entry in ICSWVersion.objects.filter(
                             Q(insert_idx=_h_idx))
                     }
                     _db_model_v, _db_database_v = (_dict["models"],
                                                    _dict["database"])
                     _v_info = "models={}, database={}".format(
                         _db_model_v,
                         _db_database_v,
                     )
                     if _db_model_v != _highest[
                             0] and _db_database_v != _highest[1]:
                         self.log(
                             "Version info from database ({}) does not match discovered info"
                             .format(_v_info), logging_tools.LOG_LEVEL_WARN)
                     else:
                         self.log(
                             "Version info from database ({}) matches discovered info, clearing mismatch flag"
                             .format(_v_info))
                         self.model_version_mismatch = False
Ejemplo n.º 46
0
Archivo: query.py Proyecto: ycg/archer
def query(request):
    cluster_name = request.POST.get('cluster_name')
    sqlContent = request.POST.get('sql_content')
    dbName = request.POST.get('db_name')
    limit_num = request.POST.get('limit_num')

    finalResult = {'status': 0, 'msg': 'ok', 'data': {}}

    # 服务器端参数验证
    if sqlContent is None or dbName is None or cluster_name is None or limit_num is None:
        finalResult['status'] = 1
        finalResult['msg'] = '页面提交参数可能为空'
        return HttpResponse(json.dumps(finalResult), content_type='application/json')

    sqlContent = sqlContent.strip()
    if sqlContent[-1] != ";":
        finalResult['status'] = 1
        finalResult['msg'] = 'SQL语句结尾没有以;结尾,请重新修改并提交!'
        return HttpResponse(json.dumps(finalResult), content_type='application/json')

    # 获取用户信息
    loginUser = request.session.get('login_username', False)
    loginUserOb = users.objects.get(username=loginUser)

    # 过滤注释语句和非查询的语句
    sqlContent = ''.join(
        map(lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
            sqlContent.splitlines(1))).strip()
    # 去除空行
    sqlContent = re.sub('[\r\n\f]{2,}', '\n', sqlContent)

    sql_list = sqlContent.strip().split('\n')
    for sql in sql_list:
        if re.match(r"^select|^show|^explain", sql.lower()):
            break
        else:
            finalResult['status'] = 1
            finalResult['msg'] = '仅支持^select|^show|^explain语法,请联系管理员!'
            return HttpResponse(json.dumps(finalResult), content_type='application/json')

    # 取出该集群的连接方式,查询只读账号,按照分号截取第一条有效sql执行
    slave_info = slave_config.objects.get(cluster_name=cluster_name)
    sqlContent = sqlContent.strip().split(';')[0]

    # 查询权限校验,以及limit_num获取
    priv_check_info = query_priv_check(loginUserOb, cluster_name, dbName, sqlContent, limit_num)

    if priv_check_info['status'] == 0:
        limit_num = priv_check_info['data']
    else:
        return HttpResponse(json.dumps(priv_check_info), content_type='application/json')

    if re.match(r"^explain", sqlContent.lower()):
        limit_num = 0

    # 对查询sql增加limit限制
    if re.match(r"^select", sqlContent.lower()):
        if re.search(r"limit\s+(\d+)$", sqlContent.lower()) is None:
            if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sqlContent.lower()) is None:
                sqlContent = sqlContent + ' limit ' + str(limit_num)

    sqlContent = sqlContent + ';'

    # 执行查询语句,统计执行时间
    t_start = time.time()
    sql_result = dao.mysql_query(slave_info.slave_host, slave_info.slave_port, slave_info.slave_user,
                                 prpCryptor.decrypt(slave_info.slave_password), str(dbName), sqlContent, limit_num)
    t_end = time.time()
    cost_time = "%5s" % "{:.4f}".format(t_end - t_start)

    sql_result['cost_time'] = cost_time

    # 数据脱敏,同样需要检查配置,是否开启脱敏,语法树解析是否允许出错继续执行
    t_start = time.time()
    if settings.DATA_MASKING_ON_OFF:
        # 仅对查询语句进行脱敏
        if re.match(r"^select", sqlContent.lower()):
            try:
                masking_result = datamasking.data_masking(cluster_name, dbName, sqlContent, sql_result)
            except Exception:
                if settings.CHECK_QUERY_ON_OFF:
                    finalResult['status'] = 1
                    finalResult['msg'] = '脱敏数据报错,请联系管理员'
                    return HttpResponse(json.dumps(finalResult), content_type='application/json')
            else:
                if masking_result['status'] != 0:
                    if settings.CHECK_QUERY_ON_OFF:
                        return HttpResponse(json.dumps(masking_result), content_type='application/json')

    t_end = time.time()
    masking_cost_time = "%5s" % "{:.4f}".format(t_end - t_start)

    sql_result['masking_cost_time'] = masking_cost_time

    finalResult['data'] = sql_result

    # 成功的查询语句记录存入数据库
    if sql_result.get('Error'):
        pass
    else:
        query_log = QueryLog()
        query_log.username = loginUser
        query_log.db_name = dbName
        query_log.cluster_name = cluster_name
        query_log.sqllog = sqlContent
        if int(limit_num) == 0:
            limit_num = int(sql_result['effect_row'])
        else:
            limit_num = min(int(limit_num), int(sql_result['effect_row']))
        query_log.effect_row = limit_num
        query_log.cost_time = cost_time
        # 防止查询超时
        try:
            query_log.save()
        except:
            connection.close()
            query_log.save()

    # 返回查询结果
    return HttpResponse(json.dumps(finalResult, cls=ExtendJSONEncoder, bigint_as_string=True),
                        content_type='application/json')
Ejemplo n.º 47
0
def run_cron(jobs=None,
             update_heartbeat=True,
             force_run=False,
             dryrun=False,
             clear_pid=False):
    try:

        # TODO: auto-kill inactive long-running cron processes whose
        # threads have stalled and not exited properly?
        # Check for 0 cpu usage.
        #ps -p <pid> -o %cpu

        stdout_map = defaultdict(list)  # {prod_id:[]}
        stderr_map = defaultdict(list)  # {prod_id:[]}
        stdout_queue = Queue()
        stderr_queue = Queue()

        if settings.CHRONIKER_AUTO_END_STALE_JOBS and not dryrun:
            Job.objects.end_all_stale()

        # Check PID file to prevent conflicts with prior executions.
        # TODO: is this still necessary? deprecate? As long as jobs run by
        # JobProcess don't wait for other jobs, multiple instances of cron
        # should be able to run simeltaneously without issue.
        if settings.CHRONIKER_USE_PID:
            pid_fn = settings.CHRONIKER_PID_FN
            pid = str(os.getpid())
            any_running = Job.objects.all_running().count()
            if not any_running:
                # If no jobs are running, then even if the PID file exists,
                # it must be stale, so ignore it.
                pass
            elif os.path.isfile(pid_fn):
                try:
                    old_pid = int(open(pid_fn, 'r').read())
                    if utils.pid_exists(old_pid):
                        print('%s already exists, exiting' % pid_fn)
                        sys.exit()
                    else:
                        print(('%s already exists, but contains stale '
                               'PID, continuing') % pid_fn)
                except ValueError:
                    pass
                except TypeError:
                    pass
            file(pid_fn, 'w').write(pid)
            clear_pid = True

        procs = []
        if force_run:
            q = Job.objects.all()
            if jobs:
                q = q.filter(id__in=jobs)
        else:
            q = Job.objects.due_with_met_dependencies_ordered(jobs=jobs)

        running_ids = set()
        for job in q:

            # This is necessary, otherwise we get the exception
            # DatabaseError: SSL error: sslv3 alert bad record mac
            # even through we're not using SSL...
            # We work around this by forcing Django to use separate
            # connections for each process by explicitly closing the
            # current connection.
            connection.close()

            # Re-check dependencies to incorporate any previous iterations
            # that marked jobs as running, potentially causing dependencies
            # to become unmet.
            Job.objects.update()
            job = Job.objects.get(id=job.id)
            if not force_run and not job.is_due_with_dependencies_met(
                    running_ids=running_ids):
                print('Job %i %s is due but has unmet dependencies.' %
                      (job.id, job))
                continue

            # Immediately mark the job as running so the next jobs can
            # update their dependency check.
            print('Running job %i %s.' % (job.id, job))
            running_ids.add(job.id)
            if dryrun:
                continue
            job.is_running = True
            Job.objects.filter(id=job.id).update(is_running=job.is_running)

            # Launch job.
            #proc = JobProcess(job, update_heartbeat=update_heartbeat, name=str(job))
            job_func = partial(
                run_job,
                job=job,
                force_run=force_run or job.force_run,
                update_heartbeat=update_heartbeat,
                name=str(job),
            )
            proc = JobProcess(job=job,
                              max_seconds=job.timeout_seconds,
                              target=job_func,
                              name=str(job),
                              kwargs=dict(
                                  stdout_queue=stdout_queue,
                                  stderr_queue=stderr_queue,
                              ))
            proc.start()
            procs.append(proc)

        if not dryrun:
            print("%d Jobs are due." % len(procs))

            # Wait for all job processes to complete.
            while procs:

                while not stdout_queue.empty():
                    proc_id, proc_stdout = stdout_queue.get()
                    stdout_map[proc_id].append(proc_stdout)

                while not stderr_queue.empty():
                    proc_id, proc_stderr = stderr_queue.get()
                    stderr_map[proc_id].append(proc_stderr)

                for proc in list(procs):

                    # Auto kill processes that haven't terminated but yet
                    # register no cpu usage.
                    #cpu = proc.get_cpu_usage_recursive()
                    #print('cpu:',proc,cpu)
                    #                    if not cpu:
                    #                        utils.kill_process(proc.pid)
                    #                        time.sleep(1)

                    if not proc.is_alive():
                        print('Process %s ended.' % (proc, ))
                        procs.remove(proc)
                    elif proc.is_expired:
                        print('Process %s expired.' % (proc, ))
                        proc_id = proc.pid
                        proc.terminate()
                        run_end_datetime = timezone.now()
                        procs.remove(proc)

                        connection.close()
                        Job.objects.update()
                        run_start_datetime = Job.objects.get(
                            id=proc.job.id).last_run_start_timestamp
                        proc.job.is_running = False
                        proc.job.force_run = False
                        proc.job.force_stop = False
                        proc.job.save()

                        # Create log record since the job was killed before it had
                        # a chance to do so.
                        Log.objects.create(
                            job=proc.job,
                            run_start_datetime=run_start_datetime,
                            run_end_datetime=run_end_datetime,
                            success=False,
                            on_time=False,
                            hostname=socket.gethostname(),
                            stdout=''.join(stdout_map[proc_id]),
                            stderr=''.join(stderr_map[proc_id] +
                                           ['Job exceeded timeout\n']),
                        )

                time.sleep(1)
            print('!' * 80)
            print('All jobs complete!')
    finally:
        if settings.CHRONIKER_USE_PID and os.path.isfile(pid_fn) \
        and clear_pid:
            os.unlink(pid_fn)
Ejemplo n.º 48
0
 def _decorator(*args, **kwargs):
     try:
         return function(*args, **kwargs)
     finally:
         connection.close()
Ejemplo n.º 49
0
    def execute(self, workflow=None):
        """执行上线单"""
        instance = workflow.instance
        execute_result = ReviewSet(
            full_sql=workflow.sqlworkflowcontent.sql_content)
        inception_engine = InceptionEngine()
        if workflow.is_backup:
            str_backup = "--enable-remote-backup"
        else:
            str_backup = "--disable-remote-backup"
        # 根据inception的要求,执行之前最好先split一下
        sql_split = f"""/*--user={instance.user};--password={instance.raw_password};--host={instance.host}; 
                         --port={instance.port};--enable-ignore-warnings;--enable-split;*/
                         inception_magic_start;
                         use {workflow.db_name};
                         {workflow.sqlworkflowcontent.sql_content}
                         inception_magic_commit;"""
        split_result = inception_engine.query(sql=sql_split)

        execute_result.rows = []
        # 对于split好的结果,再次交给inception执行,保持长连接里执行.
        for splitRow in split_result.rows:
            sql_tmp = splitRow[1]
            sql_execute = f"""/*--user={instance.user};--password={instance.raw_password};--host={instance.host};
                                --port={instance.port};--enable-execute;--enable-ignore-warnings;{str_backup};*/\
                                inception_magic_start;\
                                {sql_tmp}\
                                inception_magic_commit;"""
            one_line_execute_result = inception_engine.query(sql=sql_execute,
                                                             close_conn=False)
            # 执行, 把结果转换为ReviewSet
            for sqlRow in one_line_execute_result.to_dict():
                execute_result.rows.append(
                    ReviewResult(id=sqlRow['ID'],
                                 stage=sqlRow['stage'],
                                 errlevel=sqlRow['errlevel'],
                                 stagestatus=sqlRow['stagestatus'],
                                 errormessage=sqlRow['errormessage'],
                                 sql=sqlRow['SQL'],
                                 affected_rows=sqlRow['Affected_rows'],
                                 actual_affected_rows=sqlRow['Affected_rows'],
                                 sequence=sqlRow['sequence'],
                                 backup_dbname=sqlRow['backup_dbname'],
                                 execute_time=sqlRow['execute_time'],
                                 sqlsha1=sqlRow['sqlsha1']))

            # 每执行一次,就将执行结果更新到工单的execute_result,便于展示执行进度和保存执行信息
            workflow.sqlworkflowcontent.execute_result = execute_result.json()
            try:
                workflow.sqlworkflowcontent.save()
                workflow.save()
            # 防止执行超时
            except OperationalError:
                connection.close()
                workflow.sqlworkflowcontent.save()
                workflow.save()

        # 如果发现任何一个行执行结果里有errLevel为1或2,并且stagestatus列没有包含Execute Successfully字样,则最终执行结果为有异常.
        execute_result.status = "workflow_finish"
        for sqlRow in execute_result.rows:
            if sqlRow.errlevel in (1, 2) and not re.search(
                    r"Execute Successfully", sqlRow.stagestatus):
                execute_result.status = "workflow_exception"
                execute_result.error = "Line {0} has error/warning: {1}".format(
                    sqlRow.id, sqlRow.errormessage)
                break
        return execute_result
Ejemplo n.º 50
0
def generate_tags(article_id):
    connection.close()
    try:
        import pandas as pd
        import numpy as np
        import itertools

        from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
        from sklearn.preprocessing import MultiLabelBinarizer
        from sklearn.multiclass import OneVsRestClassifier
        from sklearn.svm import SVC

        from website.models import Article
        a = Article.objects.get(id=article_id)

        # import article's comments into dataframe
        df = pd.DataFrame(
            list(a.comment_set.all().values('id', 'article', 'disqus_id',
                                            'text', 'summary', 'tags',
                                            'suggested_tags')))
        # merge all text (comments+summaries) into a new column
        df['train_text'] = df[['text', 'summary']].apply(lambda x: ' '.join(x),
                                                         axis=1)

        # define  classifier
        clf = OneVsRestClassifier(SVC(kernel='linear'))

        # train data: use only comments with tags
        tagged = df.loc[df['tags'].notnull()]

        # train data: preproccess and vectorize (TfIdf) text data
        count_vect = CountVectorizer(
            stop_words='english',
            min_df=3,
            max_df=0.30,
            #lowercase=True,
            ngram_range=(1, 2),
        )
        X_train_counts = count_vect.fit_transform(list(tagged.train_text))
        tfidf_transformer = TfidfTransformer().fit(X_train_counts)
        X_train_tfidf = tfidf_transformer.transform(X_train_counts)
        # train classifier
        clf = clf.fit(X_train_tfidf, tagged.tags)

        # suggest tags for ALL instances in df
        test_df = df.drop_duplicates(subset=['disqus_id'])
        X_test_counts = count_vect.transform(list(test_df.train_text))
        X_test_tfidf = tfidf_transformer.transform(X_test_counts)
        suggested = clf.predict(X_test_tfidf)
        # save suggested tags to the dataframe
        test_df.suggested_tags = suggested

        # add suggested tags to the database
        sorted_df = test_df.sort_values('disqus_id')
        comments = a.comment_set.all().order_by('disqus_id')

        for comment in comments:
            comment.suggested_tags.clear()

        for row_item, comment in zip(sorted_df.iterrows(), comments):
            index, row = row_item
            if row['suggested_tags']:
                comment.suggested_tags.add(row['suggested_tags'])

    except Exception, e:
        print e
Ejemplo n.º 51
0
    def ThreadMain(self, ProcName):
        logger.info('Starting ' + ProcName)
        # Start Process pool with 2 process
        self.ReqTags = 2
        self.ProcPool = multiprocessing.Pool(self.ReqTags)
        jobs = []
        while 1:
            if self.mDieFlag == 1: break  # Request for death
            self.mLock.acquire()
            self.ProcPoolProblemFlag = 0
            for self.worker in self.ProcPool._pool:
                if not self.worker.is_alive():
                    self.ProcPoolProblemFlag = 1
                    logger.error(
                        'Problem with process_name: %s, process_pid: %s, process_exitcode: %s',
                        self.worker.name, self.worker.pid,
                        self.worker.exitcode)
            self.Time, self.Run = ESSDB.DB().action('ESSProc', 'GET',
                                                    ('Time', 'Run'),
                                                    ('Name', ProcName))[0]
            if self.Run == '0' or self.ProcPoolProblemFlag == 1:
                logger.info('Stopping ' + ProcName)
                if self.ProcPoolProblemFlag:
                    self.ProcPool.terminate()
                else:
                    self.ProcPool.close()
                self.ProcPool.join()
                ESSDB.DB().action('ESSProc', 'UPD',
                                  ('Status', '0', 'Run', '0', 'PID', '0'),
                                  ('Name', ProcName))
                time.sleep(1)
                self.mLock.release()
                logger.info('RunFlag: 0')
                time.sleep(1)
                break
            # Process Item
            lock = thread.allocate_lock()

            AccessQueue_DbRows = AccessQueue.objects.filter(Status=0).all()
            for AccessQueue_DbRow in AccessQueue_DbRows:
                ##############################################################################################Y
                # if self.ProcPool._state == 0 then pool is working.
                if self.ProcPool._state == 0:
                    # Get active queue depth for self.ProcPool._cache.
                    self.ActiveProcQueue = len(self.ProcPool._cache)
                    ###########################################################################################
                    # If self.ActiveProcQueue < self.ReqTags start DIPRequest process
                    if self.ActiveProcQueue < self.ReqTags:
                        AccessQueue_DbRow.Status = 2
                        #model.meta.Session.commit()
                        AccessQueue_DbRow.save()
                        logger.info('Add ReqUUID: %s to GenerateDIPProc' %
                                    AccessQueue_DbRow.ReqUUID)
                        res = self.ProcPool.apply_async(
                            GenerateDIPProc, (AccessQueue_DbRow.ReqUUID, ))
                        jobs.append(res)
            for job in jobs:
                try:
                    msg = 'Result from GenerateDIPProc: %s' % repr(
                        job.get(timeout=1))
                except multiprocessing.TimeoutError as e:
                    msg = 'Timeout wait for result from GenerateDIPProc'
                logger.debug(msg)
            if len(self.ProcPool._cache) == 0:
                jobs = []
            logger.debug('ProcPool_cache: %r', self.ProcPool._cache)
            connection.close()
            time.sleep(5)
            self.mLock.release()
        time.sleep(10)
        self.RunFlag = 0
        self.mDieFlag = 0
Ejemplo n.º 52
0
    except Exception, ee:
        print '--ee:', ee
    else:
        q = queqe_server()
        cursor = connection.cursor()
        print '-----------------------Start Writedata %s' % index
        if AutoDelTmp:
            try:
                ret = auto_del()
            except Exception, e:
                print '--Auto Check&Delete failed!(%s):' % e

        parseLogDataInFile(connection, cursor)
        try:
            cursor.close()
            connection.close()
        except:
            pass

    q.connection.disconnect()
    print '-----------------------End Writedata %s' % index


class WriteDataThread(threading.Thread):
    def __init__(self, index):
        self.index = index
        super(WriteDataThread, self).__init__()

    def run(self):
        run_writedata(self.index)
Ejemplo n.º 53
0
def info_list(request):
	query = request.GET['query']
	champ_df = pd.read_csv('D:/KUGG/kugg/df/Champ.csv')
	champ_list=list(champ_df['name'].values)

	if query:
		#챔피언 검색
		if query in champ_list:
			#cid=122
			for i,r in enumerate(champ_df['name']):
				if query == r:
					cid = champ_df['champion'][i]
			
			item_tree = get_item_tree(int(cid))
			#print(item_tree)
			context = {'selected_champ' : query, 'champ_list':champ_list, 'item_tree':item_tree}
			print("search champion")
			return render(request, 'kugg/selected_champion_list.html', context)
		#소환사명 검색			
		try:
			curs = conn.cursor()
			sql = "select * from usersinfo where sname=(%s)"
			curs.execute(sql,(query,))
			usersinfo = curs.fetchone()
			summonerId = usersinfo[0]
			accountId = usersinfo[1]
			sname = usersinfo[3]
			profileIconId = usersinfo[4]
			
			revisionDate_unix = usersinfo[5]/1000
			revisionDate = convert_datetime(revisionDate_unix)
			summonerLevel = usersinfo[6]
			print("userinfo complete")
		except:
			conn.rollback()
			print("Failed selecting in usersinfo")
			context = {"username": query}
			return render(request, 'kugg/info_list.html',context)
		try:
			sql = "select * from usersleague where summonerId=(%s)"
			curs.execute(sql,(summonerId,))
			usersleague = curs.fetchone()	
			tier = usersleague[0]
			#leagueId = usersleague[1]
			queue = usersleague[2] #게임종류
			lname = usersleague[3] #리그이름
			leaguePoints = usersleague[6]
			srank = usersleague[7] #마스터 1에서 숫자부분
			wins = usersleague[8]
			lossers = usersleague[9]
			veteran = usersleague[10]
			win_rate = math.trunc(wins*100/(wins+lossers))

			info_df = pd.DataFrame(columns=['sname','profileIconId','revisionDate','summonerLevel','tier','queue','lname','leaguePoints','srank','wins','lossers','veteran','win_rate'])
			info_df.loc[0]=[sname,profileIconId,revisionDate,summonerLevel,tier,queue,lname,leaguePoints,srank,wins,lossers,veteran,win_rate]
			#print(info_df)
			# info_json_records = info_df.reset_index().to_json(orient ='records') 
			# info_data = [] 
			# info_data = json.loads(info_json_records) 
			info_data = df_to_json(info_df)
			#print(info_data)
			print("uesrleague complete")
		except:
			conn.rollback()
			info_df = pd.DataFrame(columns=['sname','profileIconId','revisionDate','summonerLevel'])
			info_df.loc[0]=[sname,profileIconId,revisionDate,summonerLevel]
			#print(info_df)
			# info_json_records = info_df.reset_index().to_json(orient ='records') 
			# info_data = [] 
			# info_data = json.loads(info_json_records) 
			info_data = df_to_json(info_df)
			print("Failed selecting in usersleague")
		try:
			sql = "select * from usersmatchlist_sample where accountId=(%s) limit 20" 
			curs.execute(sql,(accountId,))
			data = curs.fetchall()

			platformId=[]
			gameId=[]
			champion_tmp=[]
			queue=[]
			season=[]
			timestamp=[]
			role=[]
			lane=[]
			
			for t in data:
				platformId.append(t[0])
				gameId.append(t[1])
				champion_tmp.append(t[2])
				queue.append(t[3])
				season.append(t[4])
				timestamp.append(t[5])
				role.append(t[6])
				lane.append(t[7])	
			
			champion=[]
			for c in champion_tmp:
				for i,r in enumerate(champ_df['champion']):
					if c == r:
						champion.append(champ_df['name'][i])
			#print(champion)
			columns_matchlist = ['platformId','gameId','champion','queue','season','timestamp','role','lane']
			
			matchlists = []
			gameId_len = len(gameId)
			for idx in range(gameId_len):
				matchlist = [platformId[idx],gameId[idx],champion[idx],queue[idx],season[idx],timestamp[idx],role[idx],lane[idx]]
				matchlists.append(matchlist)
			print("matchlist complete")
			#print(matchlists)
			
		except:
			conn.rollback()
			print("Failed selecting in usersmatchlist")
			
		try:
			#gameId = gameId[-1::-1]
			matchlists = matchlists[-1::-1]
			gameId = tuple(gameId)

			# gameId = (4684958376, 4684716231, 4681708889)
			# gameId_len = 3
			if gameId_len == 1:
				sql = "select * from matchinfo where gameMode='CLASSIC' and gameId=(%s)"  #게임이 여러개면 or연산
			elif gameId_len ==2:
				sql = "select * from matchinfo where gameMode='CLASSIC' and gameId=(%s) or gameId=(%s)"
			elif gameId_len ==3:
				sql = "select * from matchinfo where gameMode='CLASSIC' and gameId=(%s) or gameId=(%s) or gameId=(%s)"
			elif gameId_len ==4:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==5:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==6:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==7:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==8:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==9:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==10:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==11:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==12:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==13:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==14:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==15:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==16:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==17:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==18:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==19:
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			elif gameId_len ==20:
				print('gamId 20')
				sql = "select * from matchinfo where gameMode='CLASSIC' and (gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s) or gameId=(%s))"
			curs.execute(sql,gameId)
			
			print("sql complete")
			data_list=[]
			match_num=0
			matchinfo = curs.fetchone()

			print("fetch complete")
			c = 0
			comp_idx = 0
			#print(matchlists[0])

			gameId= gameId[-1::-1]
			#print(gameId)
			matchlists_comp=[]
			while matchinfo:
				#print(matchinfo[8])
				for i,g in enumerate(gameId):
					if g == matchinfo[0]:
						comp_idx = i
				#print(gameId[comp_idx])

				if matchinfo[8] == "CLASSIC" :
					#print("if CLASSIC")
					matchlists_comp.append(matchlists[comp_idx])

					#print(matchinfo[0])
					#print(matchinfo[8])
					teams = matchinfo[10]
					participants = matchinfo[11]
					pId = matchinfo[12]
					print('teams,participants')

					# teams = teams.replace('\\','')
					# teams = teams[1:-1]				
					# teams = literal_eval(teams)
					teams = json_to_df(teams)
					
					# participants = participants.replace('\\','')
					# participants = participants[1:-1]
					# participants = literal_eval(participants)
					participants = json_to_df(participants)
					
					
					# pId = pId.replace('\\','')
					# pId = pId[1:-1]
					# pId = literal_eval(pId)
					pId = json_to_df(pId)
					
					parts = [ pId[i]['player']['accountId'] for i in range(10)]
					#print(all_part)
					for i,a in enumerate(parts):
					    if a == accountId:
					        idx = i
					win_lose = participants[idx]['stats']['win']
					
					if win_lose == True:
						game_win = "승리"
					elif win_lose == False:
						game_win = "패배"

					spell1Id = participants[idx]['spell1Id']
					spell2Id = participants[idx]['spell2Id']
					perkPrimaryStyle = participants[idx]['stats']['perkPrimaryStyle']
					perkSubStyle = participants[idx]['stats']['perkSubStyle']
					item0 = participants[idx]['stats']['item0']
					item1 = participants[idx]['stats']['item1']
					item2 = participants[idx]['stats']['item2']
					item3 = participants[idx]['stats']['item3']
					item4 = participants[idx]['stats']['item4']
					item5 = participants[idx]['stats']['item5']
					item6 = participants[idx]['stats']['item6']
					item = [item0,item1,item2,item3,item4,item5,item6]
					visionWards = participants[idx]['stats']['visionWardsBoughtInGame']
					visionScore = participants[idx]['stats']['visionScore']
					kills = participants[idx]['stats']['kills']
					deaths = participants[idx]['stats']['deaths']
					assists = participants[idx]['stats']['assists']
					kda = round((kills + assists)/deaths,2) if deaths != 0 else 'perfect'
					penta = participants[idx]['stats']['pentaKills']
					quadra = participants[idx]['stats']['quadraKills']
					triple = participants[idx]['stats']['tripleKills']
					double = participants[idx]['stats']['doubleKills']
					if penta >= 1:
					    killmsg = '펜타킬'
					elif quadra >=1:
					    killmsg = '쿼드라킬'
					elif triple >=1:
					    killmsg = '트리플킬'
					elif double >=1: 
					    killmsg = '더블킬'
					else:
					    killmsg = 0
					totalDamageDealt = participants[idx]['stats']['totalDamageDealt']
					totalMinionsKilled = participants[idx]['stats']['totalMinionsKilled']
					champLevel = participants[idx]['stats']['champLevel']
					
					# pname=[]
					# for p in parts:
					# 	sql = "select * from usersinfo where accountId=(%s)"
					# 	curs.execute(sql,(p,))
					# 	data = curs.fetchone()
					# 	pname.append(data[7])
					# print(pname)

					p1champ = participants[0]['championId']
					p2champ = participants[1]['championId']
					p3champ = participants[2]['championId']
					p4champ = participants[3]['championId']
					p5champ = participants[4]['championId']
					p6champ = participants[5]['championId']
					p7champ = participants[6]['championId']
					p8champ = participants[7]['championId']
					p9champ = participants[8]['championId']
					p10champ = participants[9]['championId']
					pchamp_tmp = [p1champ,p2champ,p3champ,p4champ,p5champ,p6champ,p7champ,p8champ,p9champ,p10champ]			
					#print(p1champ)
					gameCreation = matchinfo[2]
					gameDurationH = math.trunc(matchinfo[3]/60)
					gameDurationS = matchinfo[3]%60
					queueId = matchinfo[4]
					mapId = matchinfo[5]
					seasonId = matchinfo[6]
					gameVersion = matchinfo[7][:5]
					gameMode = matchinfo[8]
					gameType = matchinfo[9]
					info = [gameCreation,gameDurationH,gameDurationS,queueId,mapId,seasonId,gameVersion,gameMode,gameType]
					#print(info)
					#print(teams[0]['bans'])
					bans_tmp = [teams[i]['bans'][j]['championId'] for i in range(2) for j in range(5)]
					#print(bans_tmp)
					pchamp=[]
					bans=[]
					
					for c,b in zip(pchamp_tmp,bans_tmp):
						for i,r in enumerate(champ_df['champion']):
							if c == r:
								pchamp.append(champ_df['name'][i])
							if b == r:
								bans.append(champ_df['name'][i])
					#print(bans)
					part = [spell1Id,spell2Id,perkPrimaryStyle,perkSubStyle,item,visionWards,visionScore,kills,deaths,assists,kda,killmsg,totalDamageDealt,totalMinionsKilled,champLevel,pchamp,bans,game_win] #,lane]
					
					match = matchlists_comp[match_num] + info + part  #+ bans
					c+=1
					match_num += 1

					columns_info = ['gameCreation','gameDurationH','gameDurationS','queueId','mapId','seasonId','gameVersion','gameMode','gameType']
					#columns_bans = ['ban1','ban2','ban3','ban4','ban5','ban6','ban7','ban8','ban9','ban10']
					columns_part = ['spell1Id','spell2Id','perkPrimaryStyle','perkSubStyle','item','visionWardsBoughtInGame','visionScore','kills','deaths','assists','kda','killmsg','totalDamageDealt','totalMinionsKilled','champLevel','pchamp','bans','game_win'] #,'lane']
					columns = columns_matchlist + columns_info  + columns_part  #+ columns_bans
					print("dataframe")
					#print(game_win)
					df = pd.DataFrame(columns=columns)
					df.loc[0]=match

					#df에 한줄씩 추가
					# json_records = df.reset_index().to_json(orient ='records')
					# data = [] 
					# data = json.loads(json_records) 
					data = df_to_json(df)
					#json들의 list를 만들어서 20개 json을 담는다
					
					data_list.append(data)
				
				matchinfo = curs.fetchone()
			print("all complete")	
			#print(data_list)
			#print(info_data)
			context = {'match_list': data_list, 'info' : info_data}

			curs.close()
			conn.close()
		except:
			conn.rollback()
			print("Failed selecting in matchinfo")
			context = {'context' : 'NULL'}
	
	else:
		err = "소환사명 또는 챔피언명을 입력해주세요"
		context = {'err' : err}

	return render(request, 'kugg/info_list.html', context)
Ejemplo n.º 54
0
    def ProcessAccessRequest(self, ReqUUID):
        """Process access request
        
        :param ReqUUID: ReqUUID in database table AccessQueue
        
        """
        try:
            logger.debug('Start ProcessAccessRequest')
            logger.debug('ReqUUID: %s' % ReqUUID)
            connection.close()  # Fix (2006, 'MySQL server has gone away')
            AccessQueue_obj = AccessQueue.objects.get(ReqUUID=ReqUUID)
            process_name = multiprocessing.current_process().name
            logger.debug('process_name: %s' % process_name)
            process_pid = multiprocessing.current_process().pid
            logger.debug('process_pid: %s' % process_pid)

            AccessQueue_obj.Status = 5
            AccessQueue_obj.save()

            if AccessQueue_obj.ReqType in (1, 3, 4, 5):
                event_info = 'Start Generate DIP Process for ObjectIdentifierValue: %s, ReqUUID: %s' % (
                    AccessQueue_obj.ObjectIdentifierValue,
                    AccessQueue_obj.ReqUUID)
                logger.info(event_info)
                ESSPGM.Events().create('1202', AccessQueue_obj.ReqPurpose,
                                       'ESSArch Access', ProcVersion, '0',
                                       event_info, 2,
                                       AccessQueue_obj.ObjectIdentifierValue)
            elif AccessQueue_obj.ReqType == 2:
                event_info = 'Start quickverify storageMediumID Process for storageMediumID: %s, ReqUUID: %s' % (
                    AccessQueue_obj.storageMediumID, AccessQueue_obj.ReqUUID)
                logger.info(event_info)
                ESSPGM.Events().create('2202', AccessQueue_obj.ReqPurpose,
                                       'ESSArch Access', ProcVersion, '0',
                                       event_info, 2,
                                       AccessQueue_obj.ObjectIdentifierValue)

            StorageMethodRead_obj = StorageMethodRead()
            StorageMethodRead_obj.logger = logger
            StorageMethodRead_obj.AccessQueue_obj = AccessQueue_obj

            if AccessQueue_obj.ReqType == 1:
                StorageMethodRead_obj.get_object_to_read()
                StorageMethodRead_obj.add_to_ioqueue()
                StorageMethodRead_obj.apply_ios_to_read()
                StorageMethodRead_obj.wait_for_all_reads()
                StorageMethodRead_obj.ip_unpack()
                StorageMethodRead_obj.ip_validate()
            elif AccessQueue_obj.ReqType == 3:
                StorageMethodRead_obj.get_object_to_read()
                StorageMethodRead_obj.add_to_ioqueue()
                StorageMethodRead_obj.apply_ios_to_read()
                StorageMethodRead_obj.wait_for_all_reads()
            elif AccessQueue_obj.ReqType in [4, 5]:
                StorageMethodRead_obj.get_object_to_read()
                StorageMethodRead_obj.add_to_ioqueue()
                StorageMethodRead_obj.apply_ios_to_read()
                StorageMethodRead_obj.wait_for_all_reads()
                StorageMethodRead_obj.ip_unpack()
                StorageMethodRead_obj.ip_validate()
                StorageMethodRead_obj.delete_retrieved_ios()
            elif AccessQueue_obj.ReqType == 2:
                StorageMethodRead_obj.get_objects_to_verify()
                StorageMethodRead_obj.add_to_ioqueue()
                StorageMethodRead_obj.apply_ios_to_read()
                StorageMethodRead_obj.wait_for_all_reads()
                StorageMethodRead_obj.delete_retrieved_ios()

        except AccessError as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            AccessQueue_obj.refresh_from_db()
            if AccessQueue_obj.ReqType in (1, 3, 4, 5):
                event_info = 'Problem to Generate DIP for ObjectIdentifierValue: %s, ReqUUID: %s, error: %s, line: %s' % (
                    AccessQueue_obj.ObjectIdentifierValue,
                    AccessQueue_obj.ReqUUID, e, exc_traceback.tb_lineno)
                logger.error(event_info)
                ESSPGM.Events().create('1203', AccessQueue_obj.ReqPurpose,
                                       'ESSArch Access', ProcVersion, '1',
                                       event_info, 2,
                                       AccessQueue_obj.ObjectIdentifierValue)
            elif AccessQueue_obj.ReqType == 2:
                event_info = 'Problem to quickverify storageMediumID: %s, ReqUUID: %s, error: %s line: %s' % (
                    AccessQueue_obj.storageMediumID, AccessQueue_obj.ReqUUID,
                    e, exc_traceback.tb_lineno)
                logger.error(event_info)
                ESSPGM.Events().create(
                    '2203',
                    AccessQueue_obj.ReqPurpose,
                    'ESSArch Access',
                    ProcVersion,
                    '1',
                    event_info,
                    2,
                    storageMediumID=AccessQueue_obj.storageMediumID)
            AccessQueue_obj.Status = 100
            AccessQueue_obj.save(update_fields=['Status'])
            #raise e
        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            msg = 'Unknown error, error: %s trace: %s' % (
                e, repr(traceback.format_tb(exc_traceback)))
            logger.error(msg)
            AccessQueue_obj.refresh_from_db()
            msg = 'Unknown error with access ReqUUID: %s, error: %s trace: %s' % (
                AccessQueue_obj.ReqUUID, e,
                repr(traceback.format_tb(exc_traceback)))
            logger.error(msg)
            AccessQueue_obj.Status = 100
            AccessQueue_obj.save(update_fields=['Status'])
            #raise e
        except:
            msg = 'Unexpected error: %s %s' % (sys.exc_info()[0],
                                               sys.exc_info()[1])
            logger.error(msg)
            print msg
            #raise
        else:
            if AccessQueue_obj.ReqType in (1, 3, 4):
                event_info = 'Success to Generate DIP for ObjectIdentifierValue: %s, ReqUUID: %s' % (
                    AccessQueue_obj.ObjectIdentifierValue,
                    AccessQueue_obj.ReqUUID)
                logger.info(event_info)
                ESSPGM.Events().create('1203', AccessQueue_obj.ReqPurpose,
                                       'ESSArch Access', ProcVersion, '0',
                                       event_info, 2,
                                       AccessQueue_obj.ObjectIdentifierValue)
            elif AccessQueue_obj.ReqType == 5:
                event_info = 'Success to get AIP to ControlArea for ObjectIdentifierValue: %s, ReqUUID: %s' % (
                    AccessQueue_obj.ObjectIdentifierValue,
                    AccessQueue_obj.ReqUUID)
                logger.info(event_info)
                ESSPGM.Events().create('1203', AccessQueue_obj.ReqPurpose,
                                       'ESSArch Access', ProcVersion, '0',
                                       event_info, 2,
                                       AccessQueue_obj.ObjectIdentifierValue)
                # Update IP in ArchiveObject DBtable
                ArchiveObject_upd = ArchiveObject.objects.get(
                    ObjectIdentifierValue=AccessQueue_obj.ObjectIdentifierValue
                )
                setattr(ArchiveObject_upd, 'StatusActivity', 7)
                # Commit DB updates
                ArchiveObject_upd.save(update_fields=['StatusActivity'])
            elif AccessQueue_obj.ReqType == 2:
                event_info = 'Success to quickverify storageMediumID: %s, ReqUUID: %s' % (
                    AccessQueue_obj.storageMediumID, AccessQueue_obj.ReqUUID)
                logger.info(event_info)
                ESSPGM.Events().create(
                    '2203',
                    AccessQueue_obj.ReqPurpose,
                    'ESSArch Access',
                    ProcVersion,
                    '0',
                    event_info,
                    2,
                    storageMediumID=AccessQueue_obj.storageMediumID)
            AccessQueue_obj.Status = 20
            AccessQueue_obj.save(update_fields=['Status'])
Ejemplo n.º 55
0
def importActivitiesFromCSV(url):

    file_name = url.split('/')[-1]
    u = urllib2.urlopen(url)
    f = open(file_name, 'wb')
    meta = u.info()
    file_size = int(meta.getheaders("Content-Length")[0])
    print "Downloading: %s Bytes: %s" % (file_name, file_size)

    file_size_dl = 0
    block_sz = 8192
    while True:
        buffer = u.read(block_sz)
        if not buffer:
            break

        file_size_dl += len(buffer)
        f.write(buffer)
        status = r"%10d  [%3.2f%%]" % (file_size_dl,
                                       file_size_dl * 100. / file_size)
        status = status + chr(8) * (len(status) + 1)
        print status,

    f.close()
    f = open(file_name, 'rb')
    csvDict = csv.DictReader(f)
    print csvDict
    print f
    print csvDict.fieldnames
    print 'File opened.'
    csvDict.fieldnames = [
        field.strip().lower() for field in csvDict.fieldnames
    ]
    print 'Field names converted: %s' % str(csvDict.fieldnames)
    count = 0
    existingCount = 0
    errorCount = 0
    print 'Starting line iteration.'
    for line in csvDict:
        # unicode_line = dict([(k.encode('utf8'), v.encode('utf8')) for k, v in line.items()])
        newTime = oracleTimeToDateTime(line['start_time_local'])
        line['start_time_local'] = newTime
        newActivity = NikeSportActivity(**line)
        # objects_to_bulk_create.append(newActivity)
        # if len(objects_to_bulk_create) >= 10:
        # 	try:
        # 		NikeSportActivity.objects.bulk_create(objects_to_bulk_create)
        # 		count += len(objects_to_bulk_create)
        # 		objects_to_bulk_create = []
        # 	except IntegrityError, e:
        # 		existingCount += len(objects_to_bulk_create)
        # 		objects_to_bulk_create = []
        # 		print 'IntegrityError during bulk save: %s' % str(e)
        # 	except DatabaseError, e:
        # 		errorCount += len(objects_to_bulk_create)
        # 		objects_to_bulk_create = []
        # 		print 'DatabaseError during bulk save: %s' % str(e)
        # 	print '%d rows completed. %d existing rows skipped. Error count: %d' % (count, existingCount, errorCount)
        try:
            newActivity.save()
            count += 1
        except IntegrityError, e:
            # print e
            connection.close()
            existingCount += 1
        except DatabaseError, e:
            # print e
            connection.close()
            errorCount += 1
Ejemplo n.º 56
0
def close_connection(**kwargs):
    connection.close()
Ejemplo n.º 57
0
    def maybeCloseDatabaseConnections(self):
        """Close database connections if their use is not permitted."""
        if self.database_use_possible and not self.database_use_permitted:
            from django.db import connection

            connection.close()
Ejemplo n.º 58
0
 def generator(cursor):
     for row in cursor:
         yield Decimal(row[0])
     cursor.close()
     connection.close()
Ejemplo n.º 59
0
 def disconnect(self, close_code):
     # Leave room group
     if self.room_group_name:
         connection.close()
         async_to_sync(self.channel_layer.group_discard)(
             self.room_group_name, self.channel_name)
Ejemplo n.º 60
0
def importUsersFromCSV(url):

    file_name = url.split('/')[-1]
    u = urllib2.urlopen(url)
    f = open(file_name, 'wb')
    meta = u.info()
    file_size = int(meta.getheaders("Content-Length")[0])
    print "Downloading: %s Bytes: %s" % (file_name, file_size)

    file_size_dl = 0
    block_sz = 8192
    while True:
        buffer = u.read(block_sz)
        if not buffer:
            break

        file_size_dl += len(buffer)
        f.write(buffer)
        status = r"%10d  [%3.2f%%]" % (file_size_dl,
                                       file_size_dl * 100. / file_size)
        status = status + chr(8) * (len(status) + 1)
        print status,

    f.close()

    f = open(file_name, 'rb')
    csvDict = csv.DictReader(f)
    print csvDict
    print f
    print csvDict.fieldnames
    print 'File opened.'
    csvDict.fieldnames = [
        field.strip().lower() for field in csvDict.fieldnames
    ]
    print 'Field names converted: %s' % str(csvDict.fieldnames)
    count = 0
    existingCount = 0
    errorCount = 0
    print 'Starting line iteration.'
    for line in csvDict:
        if line['year_birthdate'] == '':
            line['year_birthdate'] = '0'
        new_year = int(line['year_birthdate'])
        line['year_birthdate'] = new_year
        if line['height'] == '':
            line['height'] = 0
        line['height'] = float(line['height'])
        if line['weight'] == '':
            line['weight'] = 0
        line['weight'] = float(line['weight'])
        if line['gender'] == '':
            line['gender'] = 0
        elif line['gender'] == '1' or line['gender'] == '2':
            line['gender'] = int(line['gender'])
        newUser = NikeUserSport(**line)
        try:
            newUser.save()
            count += 1
        except IntegrityError:
            connection.close()
            existingCount += 1
        except DatabaseError, e:

            connection.close()
            # print e
            errorCount += 1
        print '%d rows completed. %d existing rows skipped. Error count: %d                      \r' % (
            count, existingCount, errorCount),