Example #1
0
 def testGenerateData(self):
     for i in range(100):
         school = self._new_school("Demo School %s" % i)
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__), "test_schools.json"))
     options = {"indent": 2}
     datadump = dumpdata.handle("schools", **options)
Example #2
0
def serialize_to_response(app_labels=[], exclude=[], response=None,
                          format=SMUGGLER_FORMAT, indent=SMUGGLER_INDENT):
    response = response or HttpResponse(mimetype='text/plain')
    # There's some funky output redirecting going on as Django >= 1.5 writes
    # to a wrapped output stream, instead of just returning the dumped output.
    stream = StringIO()  # this is going to be our stdout
    # We need to fake an OutputWrapper as it's only introduced in Django 1.5
    out = lambda: None
    out.write = lambda s: stream.write(s)  # this seems to be sufficient.
    try:
        # Now make sys.stdout our wrapped StringIO instance and start the dump.
        sys.stdout = out
        dumpdata = DumpData()
        dumpdata.stdout = sys.stdout
        dumpdata.stderr = sys.stderr
        output = dumpdata.handle(*app_labels, **{
            'exclude': exclude,
            'format': format,
            'indent': indent,
            'show_traceback': True,
            'use_natural_keys': True
        })
    except CommandError:
        # We expect and re-raise CommandErrors, these contain "user friendly"
        # error messages.
        raise
    else:
        if output:
            response.write(output)
        else:
            response.write(stream.getvalue())
        return response
    finally:
        # Be nice and cleanup!
        sys.stdout = sys.__stdout__
Example #3
0
def serialize_to_response(app_labels=[],
                          exclude=[],
                          response=None,
                          format=settings.SMUGGLER_FORMAT,
                          indent=settings.SMUGGLER_INDENT):
    response = response or HttpResponse(content_type='text/plain')
    stream = StringIO()
    error_stream = StringIO()
    try:
        dumpdata = DumpData()
        dumpdata.style = no_style()
        dumpdata.execute(
            *app_labels, **{
                'stdout': stream,
                'stderr': error_stream,
                'exclude': exclude,
                'format': format,
                'indent': indent,
                'use_natural_keys': True
            })
    except SystemExit:
        # Django 1.4's implementation of execute catches CommandErrors and
        # then calls sys.exit(1), we circumvent this here.
        errors = error_stream.getvalue().strip().replace('Error: ', '')
        raise CommandError(errors)
    response.write(stream.getvalue())
    return response
Example #4
0
def serialize_to_response(app_labels=[], exclude=[], response=None,
                          format=settings.SMUGGLER_FORMAT,
                          indent=settings.SMUGGLER_INDENT):
    response = response or HttpResponse(content_type='text/plain')
    stream = StringIO()
    error_stream = StringIO()
    try:
        dumpdata = DumpData()
        dumpdata.style = no_style()
        dumpdata.execute(*app_labels, **{
            'stdout': stream,
            'stderr': error_stream,
            'exclude': exclude,
            'format': format,
            'indent': indent,
            'use_natural_keys': True,
            'use_natural_foreign_keys': True,
            'use_natural_primary_keys': True
        })
    except SystemExit:
        # Django 1.4's implementation of execute catches CommandErrors and
        # then calls sys.exit(1), we circumvent this here.
        errors = error_stream.getvalue().strip().replace('Error: ', '')
        raise CommandError(errors)
    response.write(stream.getvalue())
    return response
Example #5
0
def dumpdata(database=None, fixture_path=None):
    """ @brief: Dumps data from \a database to \a fixture_path.
        @since: 2014-05-29
        @author: Jivan
        If fixture_path isn't provided, dumps to stdout.
    """
    if database is None: raise Exception('dblabel is a required argument')

    ddc = DumpDataCommand()

    # If fixture_path has been specified, steal output from stdout.
    if fixture_path:
        from cStringIO import StringIO
        old_stdout = sys.stdout
        sys.stdout = mystdout = StringIO()

    exclude = ['auth.permission', 'contenttypes']
    ddc.execute(format='json',
                natural=True,
                exclude=exclude,
                indent=4,
                database=database)

    # If fixture_path has been specified, dump the stolen output into it.
    if fixture_path:
        sys.stdout = old_stdout
        with open(fixture_path, 'w') as f:
            f.write(mystdout.getvalue())
            f.write('\n')
            mystdout.close()
def dumpdata(database=None, fixture_path=None):
    """ @brief: Dumps data from \a database to \a fixture_path.
        @since: 2014-05-29
        @author: Jivan
        If fixture_path isn't provided, dumps to stdout.
    """
    if database is None: raise Exception('dblabel is a required argument')

    ddc = DumpDataCommand()

    # If fixture_path has been specified, steal output from stdout.
    if fixture_path:
        from cStringIO import StringIO
        old_stdout = sys.stdout
        sys.stdout = mystdout = StringIO()

    exclude = ['auth.permission', 'contenttypes']
    ddc.execute(format='json', natural=True, exclude=exclude, indent=4, database=database)

    # If fixture_path has been specified, dump the stolen output into it.
    if fixture_path:
        sys.stdout = old_stdout
        with open(fixture_path, 'w') as f:
            f.write(mystdout.getvalue())
            f.write('\n')
            mystdout.close()
 def testGenerateNetFixtures(self): 
     """ This isn't actually a test.  It just takes advantage
         of the test harness to spam a bunch of messages to the 
         nigeria app and spit out the data in a format that can
         be sucked into a fixture.  It should be moved to some 
         data generator at some point, but is being left here 
         for laziness sake """
     # this is the number of net reports that will be generated
     count = 0
     
     # the sender will always be the same, for now
     phone = "55555"
     
     expected_actual_match_percent = .8
     
     
     # allow specifying the minimum and maximum dates for message generation
     min_date = datetime(2009,4,1)
     max_date = datetime(2009,4,30)
     min_time = time.mktime(min_date.timetuple())
     max_time = time.mktime(max_date.timetuple())
     
     # these are the locations that will be chosen.  The actual
     # location will be a distribution point under one of these 
     # wards
     wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201]
     all_net_strings = []
     for i in range(count):
         # this first part generates a net form at a random DP
         date = datetime.fromtimestamp(random.randint(min_time, max_time))
         ward = Location.objects.get(code=random.choice(wards))
         dp = random.choice(ward.children.all())
         distributed = random.randint(50,500)
         expected = random.randint(0,2000)
         # create an actual amount based on the likelihood of match
         if random.random() < expected_actual_match_percent:
             actual = expected
         else:
             actual = random.randint(0,2000)
         discrepancy = random.randint(0,distributed/5)
         net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy)
         all_net_strings.append(net_string)
         # the second part generates a net card form at a random MT
         date = datetime.fromtimestamp(random.randint(min_time, max_time))
         ward = Location.objects.get(code=random.choice(wards))
         dp = random.choice(ward.children.all())
         mt = random.choice(dp.children.all())
         settlements = random.randint(3, 50)
         people = random.randint(50, 600)
         coupons = random.randint(50, 600)
         net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons )
         all_net_strings.append(net_card_string)
         
     script = "\n".join(all_net_strings)
     self.runScript(script)
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json"))
     options = { "indent" : 2 }
     datadump = dumpdata.handle("bednets", **options)
Example #8
0
 def testGenerateNetFixtures(self): 
     """ This isn't actually a test.  It just takes advantage
         of the test harness to spam a bunch of messages to the 
         nigeria app and spit out the data in a format that can
         be sucked into a fixture.  It should be moved to some 
         data generator at some point, but is being left here 
         for laziness sake """
     # this is the number of net reports that will be generated
     count = 0
     
     # the sender will always be the same, for now
     phone = "55555"
     
     expected_actual_match_percent = .8
     
     
     # allow specifying the minimum and maximum dates for message generation
     min_date = datetime(2009,4,1)
     max_date = datetime(2009,4,30)
     min_time = time.mktime(min_date.timetuple())
     max_time = time.mktime(max_date.timetuple())
     
     # these are the locations that will be chosen.  The actual
     # location will be a distribution point under one of these 
     # wards
     wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201]
     all_net_strings = []
     for i in range(count):
         # this first part generates a net form at a random DP
         date = datetime.fromtimestamp(random.randint(min_time, max_time))
         ward = Location.objects.get(code=random.choice(wards))
         dp = random.choice(ward.children.all())
         distributed = random.randint(50,500)
         expected = random.randint(0,2000)
         # create an actual amount based on the likelihood of match
         if random.random() < expected_actual_match_percent:
             actual = expected
         else:
             actual = random.randint(0,2000)
         discrepancy = random.randint(0,distributed/5)
         net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy)
         all_net_strings.append(net_string)
         # the second part generates a net card form at a random MT
         date = datetime.fromtimestamp(random.randint(min_time, max_time))
         ward = Location.objects.get(code=random.choice(wards))
         dp = random.choice(ward.children.all())
         mt = random.choice(dp.children.all())
         settlements = random.randint(3, 50)
         people = random.randint(50, 600)
         coupons = random.randint(50, 600)
         net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons )
         all_net_strings.append(net_card_string)
         
     script = "\n".join(all_net_strings)
     self.runScript(script)
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json"))
     options = { "indent" : 2 }
     datadump = dumpdata.handle("bednets", **options)
Example #9
0
 def view(request):
     cmd = Command()
     cmd.stdout = StringIO()
     cmd.handle(label, format="json", exclude=[])
     return HttpResponse(
         content=cmd.stdout.getvalue(),
         content_type="application/json",
     )
Example #10
0
    def handle(self, *args, **options):
        save_load_labels = ['magic', 'scoring.coin', 'scoring.formula', 'config', 'pages', 'user.race', 'user.playergroup']
        reset_labels = ['magic', 'scoring', 'config', 'pages']
        if options['check_setup']:
            ok = True
            # Check scoring
            if not check_setup():
                self.stdout.write('FAIL: Scoring module not setup.')
                ok = False

            if ok:
                self.stdout.write('OK.\n')

        elif options['setup']:
            if options['noinput']:
                # YOLO: call_command does not work if I call the syncdb command
                # with the 'all' parameter, so we use subprocess
                subprocess.call(['python', 'manage.py', 'syncdb', '--all',
                                 '--noinput'])
            else:
                subprocess.call(['python', 'manage.py', 'syncdb', '--all'])
            # Call migrations as fake because some conflicts after moving to
            # South
            call_command('migrate', fake=True)
            # Call migrations here, because we want fixtures to be loaded
            call_command('migrate')

            self.stdout.write('Setting up scoring...')
            setup_scoring()
            self.stdout.write('\n')
            self.stdout.write('Setting up user groups...')
            setup_user_groups()
            setup_staff_groups()
            self.stdout.write('\n')
            self.stdout.write('Setting up magic...')
            setup_magic()
            self.stdout.write('\n')
            self.stdout.write('Done.\n')

        elif options['save']:
            c = DumpdataCommand()
            with open(options['save'], 'w') as fout:
                data = c.handle(*save_load_labels, **options)
                fout.write(data)
            self.stdout.write('Saved!\n')

        elif options['load']:
            call_command('loaddata', options['load'])
            self.stdout.write('Loaded!\n')

        elif options['reset']:
            call_command('sqlreset', *reset_labels)

        elif options['updatedisplay']:
            update_all_display_names()

        else:
            self.print_help('wousoctl', '')
Example #11
0
 def _dumpdata(self):
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"test_schools.json"))
     options = { "indent" : 2 }
     datadump = dumpdata.handle("locations", "reporters", "schools","blaster", **options)
     file = open(filename, "w")
     file.write(datadump)
     file.close()
     print "=== Successfully wrote fixtures to %s ===" % filename
Example #12
0
 def _dumpdata(self):
     dumpdata = Command()
     filename = os.path.abspath(
         os.path.join(os.path.dirname(__file__), "test_schools.json"))
     options = {"indent": 2}
     datadump = dumpdata.handle("locations", "reporters", "schools",
                                "blaster", **options)
     file = open(filename, "w")
     file.write(datadump)
     file.close()
     print "=== Successfully wrote fixtures to %s ===" % filename
Example #13
0
    def handle(self, *args, **options):
        save_load_labels = [
            'magic', 'scoring.coin', 'scoring.formula', 'config', 'pages',
            'user.race', 'user.playergroup'
        ]
        reset_labels = ['magic', 'scoring', 'config', 'pages']
        if options['check_setup']:
            ok = True
            # Check scoring
            if not check_setup():
                self.stdout.write('FAIL: Scoring module not setup.')
                ok = False

            if ok:
                self.stdout.write('OK.\n')

        elif options['setup']:
            if options['noinput']:
                call_command('syncdb', interactive=False)
            else:
                call_command('syncdb')
            call_command('migrate')

            self.stdout.write('Setting up scoring...')
            setup_scoring()
            self.stdout.write('\n')
            self.stdout.write('Setting up user groups...')
            setup_user_groups()
            setup_staff_groups()
            self.stdout.write('\n')
            self.stdout.write('Setting up magic...')
            setup_magic()
            self.stdout.write('\n')
            self.stdout.write('Done.\n')

        elif options['save']:
            c = DumpdataCommand()
            with open(options['save'], 'w') as fout:
                data = c.handle(*save_load_labels, **options)
                fout.write(data)
            self.stdout.write('Saved!\n')

        elif options['load']:
            call_command('loaddata', options['load'])
            self.stdout.write('Loaded!\n')

        elif options['reset']:
            call_command('sqlreset', *reset_labels)

        elif options['updatedisplay']:
            update_all_display_names()

        else:
            self.print_help('wousoctl', '')
Example #14
0
 def testPropertyDataDump(self):
     for i, value in enumerate(self.testing_data):
         prop = Property.objects.create(name='name %s' % i,
                                        source='foo',
                                        value=value)
         dumpdata = Command()
         jsondata = dumpdata.handle('mbdb')
         data = json.loads(jsondata)
         value_data = data[0]['fields']['value']
         # dump data will always dump the pickled data stringified
         self.assertEqual(unicode(value), value_data)
         prop.delete()
Example #15
0
    def testFixture(self):
        """Tests that values can be serialized to a fixture.

        XXX BROKEN, see django http://code.djangoproject.com/ticket/9522

        """
        for value in self.testing_data:
            model_test = TestingModel(pickle_field=value)
            model_test.save()
        dumpdata = Dumpdata()
        json = dumpdata.handle('mbdb')
        pass
Example #16
0
File: tests.py Project: gerv/elmo
 def testPropertyDataDump(self):
     for i, value in enumerate(self.testing_data):
         prop = Property.objects.create(name='name %s' % i,
                                        source='foo',
                                        value=value)
         dumpdata = Command()
         jsondata = dumpdata.handle('mbdb')
         data = json.loads(jsondata)
         value_data = data[0]['fields']['value']
         # dump data will always dump the pickled data stringified
         self.assertEqual(unicode(value), value_data)
         prop.delete()
Example #17
0
 def testPropertyDataDump(self):
     for i, value in enumerate(self.testing_data):
         prop = Property.objects.create(name="name %s" % i, source="foo", value=value)
         dumpdata = Command()
         # when calling handle() directly it's unable to pick up defaults
         # in Command.option_list so we have to pick that up manually
         defaults = dict((x.dest, x.default) for x in Command.option_list)
         jsondata = dumpdata.handle("mbdb", **defaults)
         data = json.loads(jsondata)
         value_data = data[0]["fields"]["value"]
         # dump data will always dump the pickled data stringified
         self.assertEqual(unicode(value), value_data)
         prop.delete()
Example #18
0
    def handle(self, *args, **options):
        save_load_labels = ['magic', 'scoring.coin', 'scoring.formula', 'config', 'pages', 'user.race', 'user.playergroup']
        reset_labels = ['magic', 'scoring', 'config', 'pages']
        if options['check_setup']:
            ok = True
            # Check scoring
            if not check_setup():
                self.stdout.write('FAIL: Scoring module not setup.')
                ok = False

            if ok:
                self.stdout.write('OK.\n')

        elif options['setup']:
            if options['noinput']:
                call_command('syncdb', interactive=False)
            else:
                call_command('syncdb')
            call_command('migrate')

            self.stdout.write('Setting up scoring...')
            setup_scoring()
            self.stdout.write('\n')
            self.stdout.write('Setting up user groups...')
            setup_user_groups()
            setup_staff_groups()
            self.stdout.write('\n')
            self.stdout.write('Setting up magic...')
            setup_magic()
            self.stdout.write('\n')
            self.stdout.write('Done.\n')

        elif options['save']:
            c = DumpdataCommand()
            with open(options['save'], 'w') as fout:
                data = c.handle(*save_load_labels, **options)
                fout.write(data)
            self.stdout.write('Saved!\n')

        elif options['load']:
            call_command('loaddata', options['load'])
            self.stdout.write('Loaded!\n')

        elif options['reset']:
            call_command('sqlreset', *reset_labels)

        elif options['updatedisplay']:
            update_all_display_names()

        else:
            self.print_help('wousoctl', '')
Example #19
0
 def testPropertyDataDump(self):
     for i, value in enumerate(self.testing_data):
         prop = Property.objects.create(name='name %s' % i,
                                        source='foo',
                                        value=value)
         dumpdata = Command()
         # when calling handle() directly it's unable to pick up defaults
         # in Command.option_list so we have to pick that up manually
         defaults = dict((x.dest, x.default) for x in Command.option_list)
         jsondata = dumpdata.handle('mbdb', **defaults)
         data = json.loads(jsondata)
         value_data = data[0]['fields']['value']
         # dump data will always dump the pickled data stringified
         self.assertEqual(unicode(value), value_data)
         prop.delete()
    def dump_tenant_data(self, schema_name=None):
        dumpdb_command = DumpDataCommand()
        if schema_name:
            print self.style.NOTICE("=== Running dumpdata for schema: %s" % schema_name)
            sync_tenant = get_tenant_model().objects.filter(schema_name=schema_name).get()
            connection.set_tenant(sync_tenant, include_public=True)
            dumpdb_command.execute(*self.app_labels, **self.options)
        else:
            public_schema_name = get_public_schema_name()
            tenant_schemas_count = get_tenant_model().objects.exclude(schema_name=public_schema_name).count()
            if not tenant_schemas_count:
                raise CommandError("No tenant schemas found")

            for tenant_schema in get_tenant_model().objects.exclude(schema_name=public_schema_name).all():
                print self.style.NOTICE("=== Running syncdb for schema %s" % tenant_schema.schema_name)
                try:
                    connection.set_tenant(tenant_schema, include_public=True)
                    dumpdb_command.execute(*self.app_labels, **self.options)
                except Exception as e:
                    print e
Example #21
0
 def dumpFixture(res):
     import os.path
     from django.core.management.commands.dumpdata import Command
     from django.core.management.base import CommandError
     try:
         (open("allruns.json","w")
          .write(Command().handle(use_natural_keys=True,
                                  indent=2)))
     except CommandError, e:
         log.msg("You might run in to https://code.djangoproject.com/ticket/16317")
         log.err(e)
Example #22
0
def serialize_to_response(app_labels=None, exclude=None, response=None,
                          format=settings.SMUGGLER_FORMAT,
                          indent=settings.SMUGGLER_INDENT):
    app_labels = app_labels or []
    exclude = exclude or []
    response = response or HttpResponse(content_type='text/plain')
    stream = StringIO()
    error_stream = StringIO()
    dumpdata = DumpData()
    dumpdata.style = no_style()
    dumpdata.execute(*app_labels, **{
        'stdout': stream,
        'stderr': error_stream,
        'exclude': exclude,
        'format': format,
        'indent': indent,
        'use_natural_foreign_keys': True,
        'use_natural_primary_keys': True
    })
    response.write(stream.getvalue())
    return response
    def handle(self, *args, **options):
        from django.conf import settings

        try: 
            try:
                shp = args[0]
                xls = args[1]
            except IndexError:
                shp = os.path.realpath(os.path.join(os.path.dirname(__file__), 
                    '..', '..', '..', '..', 'data', 'planning_units_simple.shp'))
                xls = os.path.realpath(os.path.join(os.path.dirname(__file__), 
                    '..', '..', '..', '..', 'data', 'metrics.xls'))

            assert os.path.exists(shp)
            assert os.path.exists(xls)
            print "Using %s as the data layer" % shp
            print "Using %s as the xls metadata" % xls
        except AssertionError:
            raise CommandError("Specify shp and xls file\n \
                    python manage.py import_planning_units test.shp test.xls <optional: full res shp>")

        try:
            fullres_shp = args[2]
            assert os.path.exists(fullres_shp)
            print "Using %s as the full-res display layer" % fullres_shp
        except (AssertionError, IndexError):
            print "Using %s as the full-res display layer" % shp
            fullres_shp = shp

        backup = False
        import_shp = True
        app = 'seak'

        modls = ['ConservationFeature',  'Cost', 'Aux', 'PuVsCf', 'PuVsCost', 'PuVsAux']
        if import_shp:
            modls.append('PlanningUnit')

        # backup old tables
        if backup:
            print "backing up old tables to /tmp/"
            from django.core.management.commands.dumpdata import Command as Dumper
            for modl in modls:
                try:
                    fix = Dumper.handle(Dumper(), "%s.%s" % (app, modl.lower()), format='json', indent=4)
                except CommandError, message:
                    print "# dumpdata raised a CommandError: %s" % message
                else:
                    fixname = "/tmp/%s_%s.json" % (app, modl.lower())
                    fh = open(os.path.join(fixname), "w+")
                    fh.write(fix)
                    fh.close()
Example #24
0
def serialize_to_response(app_labels=None,
                          exclude=None,
                          response=None,
                          format=settings.SMUGGLER_FORMAT,
                          indent=settings.SMUGGLER_INDENT):
    app_labels = app_labels or []
    exclude = exclude or []
    response = response or HttpResponse(content_type='text/plain')
    stream = StringIO()
    error_stream = StringIO()
    dumpdata = DumpData()
    dumpdata.style = no_style()
    dumpdata.execute(
        *app_labels, **{
            'stdout': stream,
            'stderr': error_stream,
            'exclude': exclude,
            'format': format,
            'indent': indent,
            'use_natural_foreign_keys': True,
            'use_natural_primary_keys': True
        })
    response.write(stream.getvalue())
    return response
Example #25
0
#------------------------------------#

modls = ['ConservationFeature', 'Cost', 'PuVsCf', 'PuVsCost']
if import_shp:
    modls.append('PlanningUnit')

# backup old tables
if backup:
    print "backing up old tables to /tmp/"
    from django.core.management.commands.dumpdata import Command as Dumper
    from django.core.management.base import CommandError
    app = 'arp'
    for modl in modls:
        try:
            fix = Dumper.handle(Dumper(),
                                "%s.%s" % (app, modl.lower()),
                                format='json',
                                indent=4)
        except CommandError, message:
            print "# dumpdata raised a CommandError: %s" % message
        else:
            fixname = "/tmp/%s_%s.json" % (app, modl.lower())
            fh = open(os.path.join(fixname), "w+")
            fh.write(fix)
            fh.close()

# Clear them out
print
print "Cleaning out old tables"
ms = [ConservationFeature, Cost, PuVsCf, PuVsCost]
if import_shp:
    ms.append(PlanningUnit)
Example #26
0
    def handle(self, *args, **options):

        pdict = {'origin': 'django app backup'}
        outfile = options.get('output')
        verb = int(options.get('verbosity', '1'))

        bnum = int(options.get('backup'))
        if bnum and os.path.isfile(outfile):
            # backup file names
            backups = ['%s.%d' % (outfile, n) for n in range(bnum)]
            backups.reverse()  # .2, .1, .0
            if verb > 1:
                print 'backups', backups

            # rotate
            for n in range(1, len(backups)):
                TO, FROM = backups[n - 1], backups[n]
                if verb > 1:
                    print 'rotate', FROM, TO
                if os.path.isfile(FROM):
                    # eg. Replace .1 with .0
                    if os.path.isfile(TO):
                        os.remove(TO)
                    os.rename(FROM, TO)

            # .0 should not exist
            assert not os.path.isfile(backups[-1])

            os.rename(outfile, backups[-1])

        output = tarfile.open(name=outfile,
                              mode='w:gz',
                              format=tarfile.PAX_FORMAT,
                              pax_headers=pdict)
        try:
            with tempfile.SpooledTemporaryFile(mode='w+') as dout:

                dumpcmd = DumpCommand()

                dumpcmd.stdout = dout
                dumpcmd.stderr = self.stderr
                dumpcmd.handle(*args, **options)
                if verb > 1:
                    print 'raw DB dump size', dout.tell()

                dname = output.tarinfo('db.json')
                dname.type = tarfile.REGTYPE
                dname.size = dout.tell()
                dname.mtime = time.time()
                dname.uid = os.geteuid()
                dname.gid = os.getegid()

                dout.seek(0)
                output.addfile(dname, dout)

            if options.get('media') and os.path.isdir(settings.MEDIA_ROOT):
                if verb > 1:
                    print 'backup MEDIA_ROOT =', settings.MEDIA_ROOT
                output.add(settings.MEDIA_ROOT, 'media')

            if options.get('static') and os.path.isdir(settings.STATIC_ROOT):
                if verb > 1:
                    print 'backup STATIC_ROOT =', settings.STATIC_ROOT
                output.add(settings.STATIC_ROOT, 'static')

            if verb > 2:
                output.list()
            output.close()
        except:
            # delete partial output on error
            output.close()
            os.remove(outfile)
            raise
def migrate_fixture(fixture_path, db='fixture_migrator'):
    """ @brief: Uses South migrations in the current project to update the contents of the
            fixture at \a fixture_path.
        @author: Jivan
        @since: 2014-04-08
    """
    # --- Create empty database migrated to latest migrations.
#     from django.core.management.commands.flush import Command as FlushCommand
#     fc = FlushCommand()
#     fc.execute(database=db, interactive=False, verbosity=0)
    logger.info('--- Syncing Database tables to Current Models')
    from south.management.commands.syncdb import Command as SyncDBCommand
    sc = SyncDBCommand()
    sc.execute(migrate_all=True, migrate=False, database=db, interactive=False, verbosity=0)
    logger.info('--- Faking Migrations to Current Latest')
    from south.management.commands.migrate import Command as MigrateCommand
    mc = MigrateCommand()
    mc.execute(all_apps=True, fake=True, database=db, interactive=False, verbosity=0)
 
    # --- Get South Migration History from fixture.
    # Fixture file
    with open(fixture_path, 'r') as ff:
        fixture_contents = json.load(ff)
        fixture_migrations = [
            { i['fields']['app_name']: i['fields']['migration'] }
                for i in fixture_contents
                if i['model'] == 'south.migrationhistory'
        ]
    if len(fixture_migrations) == 0:
        logger.info('No migration history found in fixture, guessing migrations from last commit this fixture was migrated.')
        fixture_migrations = guess_migrations_from_git_repository(fixture_path)

    fixture_latest_migrations = defaultdict(unicode)
    for app, migration in fixture_migrations.items():
        latest_migration = fixture_latest_migrations[app]
        if latest_migration == '' or migration > latest_migration:
            fixture_latest_migrations[app] = migration
      
    # --- Migrate database to latest migrations in fixture
    logger.info('--- Migrating database backwards to latest migrations in fixture.')
    for app, latest_migration in fixture_latest_migrations.items():
        print('Migrating {} to {}'.format(app, latest_migration))
        try:
            mc.execute(app=app, target=latest_migration, database=db, interactive=False, verbosity=0)
        except ImproperlyConfigured as ex:
            if ex.message == 'App with label {} could not be found'.format(app):
                logger.error("Looks like app '{}' was removed from settings.  "
                             "I'll remove its entries from South's Migration history "
                             "in the new fixture.".format(app))
            MigrationHistory.objects.using(db).filter(app_name=app).delete()
            continue

    # --- Load fixture
    from django.core.management.commands.loaddata import Command as LoadDataCommand
    ldc = LoadDataCommand()
    ldc.execute(fixture_path, database=db, verbosity=1)
    
    # --- Migrate to latest migrations in codebase
    mc.execute(database=db, interactive=False, verbosity=1)
 
    # --- Dump the contents back out to fixture
    from django.core.management.commands.dumpdata import Command as DumpDataCommand
    ddc = DumpDataCommand()
    from cStringIO import StringIO
    old_stdout = sys.stdout
    sys.stdout = mystdout = StringIO()
    ddc.execute(format='json', indent=4, database=db, exclude=[])
    sys.stdout = old_stdout
    with open(fixture_path, 'w') as f:
        f.write(mystdout.getvalue())
        mystdout.close()
Example #28
0
 def testFixture(self): 
     """"This isn't actually a test.  It just takes advantage
         of the test harness to spam a bunch of messages to the 
         supply app and spit out the data in a format that can
         be sucked into a fixture"""
     # this is the number of transactions that will be generated
     transaction_count = 0
     
     # these are the locations that will be the origins, chosen randomly
     # from this list
     # the destinations will be chosen randomly from the origins' children
     originating_locations = [20, 2001, 2002, 2003,2004]
     stock_levels = dict([[loc, random.randint(1, 10000) * 10 + 50000] for loc in originating_locations])
         
     
     # the sender will always be the same, for now
     phone = "55555"
     all_txns = []
     # these are the percentages these items will match
     waybill_match_percent = .9
     amount_match_percent = .9
     loc_match_percent = .95
     num_locs = len(Location.objects.all())
     
     # allow specifying the minimum and maximum dates for message generation
     min_date = datetime(2009,4,1)
     max_date = datetime(2009,4,30)
     min_time = time.mktime(min_date.timetuple())
     max_time = time.mktime(max_date.timetuple())
     
     # generate the array of dates we're going to use at the start.  This is so we can order 
     # our transactions
     iss_dates = []
     for i in range(transaction_count):
         iss_dates.append(datetime.fromtimestamp(random.randint(min_time, max_time)))
     iss_dates.sort()
     rec_dates = []
     for i in range(transaction_count):
         # make the date from a min and max timestamp
         rec_dates.append(datetime.fromtimestamp(
             random.randint(
                # the min is the shipment date
                time.mktime(iss_dates[i].timetuple()), 
                #the max is the shipment date + 0 to 4 days
                time.mktime((iss_dates[i] + timedelta(random.randint(0,4))).timetuple()))))
     
     for i in range(transaction_count):
         # get some random data based on the parameters we've set above
         origin = Location.objects.get(code=random.choice(originating_locations ))
         destination = random.choice(origin.children.all())
         waybill = random.randint(10000,99999)
         amount = random.randint(1, 500) * 10
         diff = stock_levels[int(origin.code)] - amount 
         if diff > 0:
             stock = diff
         else:
             stock = random.randint(1, 10000) * 10
         stock_levels[int(origin.code)] = stock
         issue_string = "%s@%s > llin issue from %s to %s %s %s %s" % (phone, iss_dates[i].strftime("%Y%m%d%H%M"), origin.code, destination.code, waybill, amount, stock)
         all_txns.append(issue_string)
         # create a waybill number based on the likelihood of match
         if random.random() < waybill_match_percent:
             ret_waybill = waybill
         else:
             ret_waybill = random.randint(10000,99999)
         # create an amount based on the likelihood of match
         if random.random() < amount_match_percent:
             ret_amount = amount
         else:
             ret_amount = random.randint(1, 500) * 10
         # create an origin and destination based on the likelihood of match
         if random.random() < loc_match_percent:
             ret_orig = origin
         else:
             ret_orig = Location.objects.get(pk=random.randint(1,num_locs))
         if random.random() < loc_match_percent:
             ret_dest = destination
         else:
             ret_dest = Location.objects.get(pk=random.randint(1, num_locs))
         if stock_levels.has_key(int(ret_dest.code)):
             ret_stock = stock_levels[int(ret_dest.code)] + amount
         else: 
             # make sure the stock at the receiver is higher than the amount of the bill
             ret_stock = random.randint(1, 2000) * 10 + ret_amount
         stock_levels[int(ret_dest.code)] = ret_stock
         # make sure the date received is after the date sent
         receive_string = "%s@%s > llin receive from %s to %s %s %s %s" % (phone, rec_dates[i].strftime("%Y%m%d%H%M"), ret_orig.code, ret_dest.code, ret_waybill, ret_amount, ret_stock)
         all_txns.append(receive_string)
         
     script = "\n".join(all_txns)
     self.runScript(script)
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_transactions_stock.json"))
     options = { "indent" : 2 }
     datadump = dumpdata.handle("supply", **options)
Example #29
0
 def testFixture(self): 
     """"This isn't actually a test.  It just takes advantage
         of the test harness to spam a bunch of messages to the 
         supply app and spit out the data in a format that can
         be sucked into a fixture"""
     # this is the number of transactions that will be generated
     transaction_count = 0
     
     # these are the locations that will be the origins, chosen randomly
     # from this list
     # the destinations will be chosen randomly from the origins' children
     originating_locations = [20, 2001, 2002, 2003,2004]
     stock_levels = dict([[loc, random.randint(1, 10000) * 10 + 50000] for loc in originating_locations])
         
     
     # the sender will always be the same, for now
     phone = "55555"
     all_txns = []
     # these are the percentages these items will match
     waybill_match_percent = .9
     amount_match_percent = .9
     loc_match_percent = .95
     num_locs = len(Location.objects.all())
     
     # allow specifying the minimum and maximum dates for message generation
     min_date = datetime(2009,4,1)
     max_date = datetime(2009,4,30)
     min_time = time.mktime(min_date.timetuple())
     max_time = time.mktime(max_date.timetuple())
     
     # generate the array of dates we're going to use at the start.  This is so we can order 
     # our transactions
     iss_dates = []
     for i in range(transaction_count):
         iss_dates.append(datetime.fromtimestamp(random.randint(min_time, max_time)))
     iss_dates.sort()
     rec_dates = []
     for i in range(transaction_count):
         # make the date from a min and max timestamp
         rec_dates.append(datetime.fromtimestamp(
             random.randint(
                # the min is the shipment date
                time.mktime(iss_dates[i].timetuple()), 
                #the max is the shipment date + 0 to 4 days
                time.mktime((iss_dates[i] + timedelta(random.randint(0,4))).timetuple()))))
     
     for i in range(transaction_count):
         # get some random data based on the parameters we've set above
         origin = Location.objects.get(code=random.choice(originating_locations ))
         destination = random.choice(origin.children.all())
         waybill = random.randint(10000,99999)
         amount = random.randint(1, 500) * 10
         diff = stock_levels[int(origin.code)] - amount 
         if diff > 0:
             stock = diff
         else:
             stock = random.randint(1, 10000) * 10
         stock_levels[int(origin.code)] = stock
         issue_string = "%s@%s > llin issue from %s to %s %s %s %s" % (phone, iss_dates[i].strftime("%Y%m%d%H%M"), origin.code, destination.code, waybill, amount, stock)
         all_txns.append(issue_string)
         # create a waybill number based on the likelihood of match
         if random.random() < waybill_match_percent:
             ret_waybill = waybill
         else:
             ret_waybill = random.randint(10000,99999)
         # create an amount based on the likelihood of match
         if random.random() < amount_match_percent:
             ret_amount = amount
         else:
             ret_amount = random.randint(1, 500) * 10
         # create an origin and destination based on the likelihood of match
         if random.random() < loc_match_percent:
             ret_orig = origin
         else:
             ret_orig = Location.objects.get(pk=random.randint(1,num_locs))
         if random.random() < loc_match_percent:
             ret_dest = destination
         else:
             ret_dest = Location.objects.get(pk=random.randint(1, num_locs))
         if stock_levels.has_key(int(ret_dest.code)):
             ret_stock = stock_levels[int(ret_dest.code)] + amount
         else: 
             # make sure the stock at the receiver is higher than the amount of the bill
             ret_stock = random.randint(1, 2000) * 10 + ret_amount
         stock_levels[int(ret_dest.code)] = ret_stock
         # make sure the date received is after the date sent
         receive_string = "%s@%s > llin receive from %s to %s %s %s %s" % (phone, rec_dates[i].strftime("%Y%m%d%H%M"), ret_orig.code, ret_dest.code, ret_waybill, ret_amount, ret_stock)
         all_txns.append(receive_string)
         
     script = "\n".join(all_txns)
     self.runScript(script)
     dumpdata = Command()
     filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_transactions_stock.json"))
     options = { "indent" : 2 }
     datadump = dumpdata.handle("supply", **options)
#------------------------------------#

modls = ['ConservationFeature',  'Cost', 'PuVsCf', 'PuVsCost']
if import_shp:
    modls.append('PlanningUnit')

# backup old tables
if backup:
    print "backing up old tables to /tmp/"
    from django.core.management.commands.dumpdata import Command as Dumper
    from django.core.management.base import CommandError
    app = 'arp'
    for modl in modls:
        try:
            fix = Dumper.handle(Dumper(), "%s.%s" % (app, modl.lower()), format='json', indent=4)
        except CommandError, message:
            print "# dumpdata raised a CommandError: %s" % message
        else:
            fixname = "/tmp/%s_%s.json" % (app, modl.lower())
            fh = open(os.path.join(fixname), "w+")
            fh.write(fix)
            fh.close()

# Clear them out
print
print "Cleaning out old tables"
ms = [ConservationFeature, Cost, PuVsCf, PuVsCost]
if import_shp:
    ms.append(PlanningUnit)
for m in ms: