예제 #1
0
 def __exit__(self, exc_type, exc_value, traceback):
     if exc_type is None:
         return
     for dj_exc_type in (
             DataError,
             OperationalError,
             IntegrityError,
             InternalError,
             ProgrammingError,
             NotSupportedError,
             DatabaseError,
             InterfaceError,
             Error,
     ):
         db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
         if issubclass(exc_type, db_exc_type):
             dj_exc_value = dj_exc_type(*exc_value.args)
             dj_exc_value.__cause__ = exc_value
             if not hasattr(exc_value, '__traceback__'):
                 exc_value.__traceback__ = traceback
             # Only set the 'errors_occurred' flag for errors that may make
             # the connection unusable.
             if dj_exc_type not in (DataError, IntegrityError):
                 self.wrapper.errors_occurred = True
             six.reraise(dj_exc_type, dj_exc_value, traceback)
예제 #2
0
def parse_http_date(date):
    """
    Parses a date format as specified by HTTP RFC7231 section 7.1.1.1.

    The three formats allowed by the RFC are accepted, even if only the first
    one is still in widespread use.

    Returns an integer expressed in seconds since the epoch, in UTC.
    """
    # emails.Util.parsedate does the job for RFC1123 dates; unfortunately
    # RFC7231 makes it mandatory to support RFC850 dates too. So we roll
    # our own RFC-compliant parsing.
    for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
        m = regex.match(date)
        if m is not None:
            break
    else:
        raise ValueError("%r is not in a valid HTTP date format" % date)
    try:
        year = int(m.group('year'))
        if year < 100:
            if year < 70:
                year += 2000
            else:
                year += 1900
        month = MONTHS.index(m.group('mon').lower()) + 1
        day = int(m.group('day'))
        hour = int(m.group('hour'))
        min = int(m.group('min'))
        sec = int(m.group('sec'))
        result = datetime.datetime(year, month, day, hour, min, sec)
        return calendar.timegm(result.utctimetuple())
    except Exception:
        six.reraise(ValueError, ValueError("%r is not a valid date" % date),
                    sys.exc_info()[2])
예제 #3
0
def popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding='utf-8'):
    """
    Friendly wrapper around Popen.

    Returns stdout output, stderr output and OS status code.
    """
    try:
        p = Popen(args,
                  shell=False,
                  stdout=PIPE,
                  stderr=PIPE,
                  close_fds=os.name != 'nt')
    except OSError as e:
        strerror = force_text(e.strerror,
                              DEFAULT_LOCALE_ENCODING,
                              strings_only=True)
        six.reraise(
            os_err_exc_type,
            os_err_exc_type('Error executing %s: %s' % (args[0], strerror)),
            sys.exc_info()[2])
    output, errors = p.communicate()
    return (force_text(output,
                       stdout_encoding,
                       strings_only=True,
                       errors='strict'),
            force_text(errors,
                       DEFAULT_LOCALE_ENCODING,
                       strings_only=True,
                       errors='replace'), p.returncode)
예제 #4
0
파일: flush.py 프로젝트: thektulu/arouse
    def handle(self, **options):
        database = options['database']
        connection = connections[database]
        verbosity = options['verbosity']
        interactive = options['interactive']
        # The following are stealth options used by Django's internals.
        reset_sequences = options.get('reset_sequences', True)
        allow_cascade = options.get('allow_cascade', False)
        inhibit_post_migrate = options.get('inhibit_post_migrate', False)

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_config in apps.get_app_configs():
            try:
                import_module('.management', app_config.name)
            except ImportError:
                pass

        sql_list = sql_flush(self.style, connection, only_django=True,
                             reset_sequences=reset_sequences,
                             allow_cascade=allow_cascade)

        if interactive:
            confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to an empty state.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
        else:
            confirm = 'yes'

        if confirm == 'yes':
            try:
                with transaction.atomic(using=database,
                                        savepoint=connection.features.can_rollback_ddl):
                    with connection.cursor() as cursor:
                        for sql in sql_list:
                            cursor.execute(sql)
            except Exception as e:
                new_msg = (
                    "Database %s couldn't be flushed. Possible reasons:\n"
                    "  * The database isn't running or isn't configured correctly.\n"
                    "  * At least one of the expected database tables doesn't exist.\n"
                    "  * The SQL was invalid.\n"
                    "Hint: Look at the output of 'django-admin sqlflush'. "
                    "That's the SQL this command wasn't able to run.\n"
                    "The full error: %s") % (connection.settings_dict['NAME'], e)
                six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])

            # Empty sql_list may signify an empty database and post_migrate would then crash
            if sql_list and not inhibit_post_migrate:
                # Emit the post migrate signal. This allows individual applications to
                # respond as if the database had been migrated from scratch.
                emit_post_migrate_signal(verbosity, interactive, database)
        else:
            self.stdout.write("Flush cancelled.\n")
예제 #5
0
파일: base.py 프로젝트: thektulu/arouse
 def executemany(self, query, args):
     try:
         return self.cursor.executemany(query, args)
     except Database.OperationalError as e:
         # Map some error codes to IntegrityError, since they seem to be
         # misclassified and Django would prefer the more logical place.
         if e.args[0] in self.codes_for_integrityerror:
             six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
         raise
예제 #6
0
파일: base.py 프로젝트: thektulu/arouse
 def execute(self, query, params=None):
     query, params = self._fix_for_params(query, params)
     self._guess_input_sizes([params])
     try:
         return self.cursor.execute(query, self._param_generator(params))
     except Database.DatabaseError as e:
         # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
         if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
             six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
         raise
예제 #7
0
 def _isdst(self, dt):
     try:
         return super(LocalTimezone, self)._isdst(dt)
     except (OverflowError, ValueError) as exc:
         exc_type = type(exc)
         exc_value = exc_type(
             "Unsupported value: %r. You should install pytz." % dt)
         exc_value.__cause__ = exc
         if not hasattr(exc, '__traceback__'):
             exc.__traceback__ = sys.exc_info()[2]
         six.reraise(exc_type, exc_value, sys.exc_info()[2])
예제 #8
0
파일: base.py 프로젝트: thektulu/arouse
 def _commit(self):
     if self.connection is not None:
         try:
             return self.connection.commit()
         except Database.DatabaseError as e:
             # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
             # with the following attributes and values:
             #  code = 2091
             #  message = 'ORA-02091: transaction rolled back
             #            'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
             #               _C00102056) violated - parent key not found'
             # We convert that particular case to our IntegrityError exception
             x = e.args[0]
             if hasattr(x, 'code') and hasattr(x, 'message') \
                and x.code == 2091 and 'ORA-02291' in x.message:
                 six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
             raise
예제 #9
0
파일: json.py 프로젝트: thektulu/arouse
def Deserializer(stream_or_string, **options):
    """
    Deserialize a stream or string of JSON data.
    """
    if not isinstance(stream_or_string, (bytes, six.string_types)):
        stream_or_string = stream_or_string.read()
    if isinstance(stream_or_string, bytes):
        stream_or_string = stream_or_string.decode('utf-8')
    try:
        objects = json.loads(stream_or_string)
        for obj in PythonDeserializer(objects, **options):
            yield obj
    except GeneratorExit:
        raise
    except Exception as e:
        # Map to deserializer error
        six.reraise(DeserializationError, DeserializationError(e),
                    sys.exc_info()[2])
예제 #10
0
파일: base.py 프로젝트: thektulu/arouse
 def executemany(self, query, params=None):
     if not params:
         # No params given, nothing to do
         return None
     # uniform treatment for sequences and iterables
     params_iter = iter(params)
     query, firstparams = self._fix_for_params(query, next(params_iter))
     # we build a list of formatted params; as we're going to traverse it
     # more than once, we can't make it lazy by using a generator
     formatted = [firstparams] + [self._format_params(p) for p in params_iter]
     self._guess_input_sizes(formatted)
     try:
         return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
     except Database.DatabaseError as e:
         # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
         if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
             six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
         raise
예제 #11
0
def Deserializer(stream_or_string, **options):
    """
    Deserialize a stream or string of YAML data.
    """
    if isinstance(stream_or_string, bytes):
        stream_or_string = stream_or_string.decode('utf-8')
    if isinstance(stream_or_string, six.string_types):
        stream = StringIO(stream_or_string)
    else:
        stream = stream_or_string
    try:
        for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
            yield obj
    except GeneratorExit:
        raise
    except Exception as e:
        # Map to deserializer error
        six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
예제 #12
0
def import_string(dotted_path):
    """
    Import a dotted module path and return the attribute/class designated by the
    last name in the path. Raise ImportError if the import failed.
    """
    try:
        module_path, class_name = dotted_path.rsplit('.', 1)
    except ValueError:
        msg = "%s doesn't look like a module path" % dotted_path
        six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])

    module = import_module(module_path)

    try:
        return getattr(module, class_name)
    except AttributeError:
        msg = 'Module "%s" does not define a "%s" attribute/class' % (
            module_path, class_name)
        six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
예제 #13
0
 def remove_replacement_node(self, replacement, replaced):
     """
     The inverse operation to `remove_replaced_nodes`. Almost. Removes the
     replacement node `replacement` and remaps its child nodes to
     `replaced` - the list of nodes it would have replaced. Its parent
     nodes are not remapped as they are expected to be correct already.
     """
     self.nodes.pop(replacement, None)
     try:
         replacement_node = self.node_map.pop(replacement)
     except KeyError as exc:
         exc_value = NodeNotFoundError(
             "Unable to remove replacement node %r. It was either never added"
             " to the migration graph, or has been removed already." %
             (replacement, ), replacement)
         exc_value.__cause__ = exc
         if not hasattr(exc, '__traceback__'):
             exc.__traceback__ = sys.exc_info()[2]
         six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
     replaced_nodes = set()
     replaced_nodes_parents = set()
     for key in replaced:
         replaced_node = self.node_map.get(key)
         if replaced_node:
             replaced_nodes.add(replaced_node)
             replaced_nodes_parents |= replaced_node.parents
     # We're only interested in the latest replaced node, so filter out
     # replaced nodes that are parents of other replaced nodes.
     replaced_nodes -= replaced_nodes_parents
     for child in replacement_node.children:
         child.parents.remove(replacement_node)
         for replaced_node in replaced_nodes:
             replaced_node.add_child(child)
             child.add_parent(replaced_node)
     for parent in replacement_node.parents:
         parent.children.remove(replacement_node)
         # NOTE: There is no need to remap parent dependencies as we can
         # assume the replaced nodes already have the correct ancestry.
     self.clear_cache()
예제 #14
0
 def remove_replaced_nodes(self, replacement, replaced):
     """
     Removes each of the `replaced` nodes (when they exist). Any
     dependencies that were referencing them are changed to reference the
     `replacement` node instead.
     """
     # Cast list of replaced keys to set to speed up lookup later.
     replaced = set(replaced)
     try:
         replacement_node = self.node_map[replacement]
     except KeyError as exc:
         exc_value = NodeNotFoundError(
             "Unable to find replacement node %r. It was either never added"
             " to the migration graph, or has been removed." %
             (replacement, ), replacement)
         exc_value.__cause__ = exc
         if not hasattr(exc, '__traceback__'):
             exc.__traceback__ = sys.exc_info()[2]
         six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
     for replaced_key in replaced:
         self.nodes.pop(replaced_key, None)
         replaced_node = self.node_map.pop(replaced_key, None)
         if replaced_node:
             for child in replaced_node.children:
                 child.parents.remove(replaced_node)
                 # We don't want to create dependencies between the replaced
                 # node and the replacement node as this would lead to
                 # self-referencing on the replacement node at a later iteration.
                 if child.key not in replaced:
                     replacement_node.add_child(child)
                     child.add_parent(replacement_node)
             for parent in replaced_node.parents:
                 parent.children.remove(replaced_node)
                 # Again, to avoid self-referencing.
                 if parent.key not in replaced:
                     replacement_node.add_parent(parent)
                     parent.add_child(replacement_node)
     self.clear_cache()
예제 #15
0
def raise_last_exception():
    global _exception
    if _exception is not None:
        six.reraise(*_exception)
예제 #16
0
파일: loader.py 프로젝트: thektulu/arouse
 def build_graph(self):
     """
     Builds a migration dependency graph using both the disk and database.
     You'll need to rebuild the graph if you apply migrations. This isn't
     usually a problem as generally migration stuff runs in a one-shot process.
     """
     # Load disk data
     self.load_disk()
     # Load database data
     if self.connection is None:
         self.applied_migrations = set()
     else:
         recorder = MigrationRecorder(self.connection)
         self.applied_migrations = recorder.applied_migrations()
     # To start, populate the migration graph with nodes for ALL migrations
     # and their dependencies. Also make note of replacing migrations at this step.
     self.graph = MigrationGraph()
     self.replacements = {}
     for key, migration in self.disk_migrations.items():
         self.graph.add_node(key, migration)
         # Internal (aka same-app) dependencies.
         self.add_internal_dependencies(key, migration)
         # Replacing migrations.
         if migration.replaces:
             self.replacements[key] = migration
     # Add external dependencies now that the internal ones have been resolved.
     for key, migration in self.disk_migrations.items():
         self.add_external_dependencies(key, migration)
     # Carry out replacements where possible.
     for key, migration in self.replacements.items():
         # Get applied status of each of this migration's replacement targets.
         applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
         # Ensure the replacing migration is only marked as applied if all of
         # its replacement targets are.
         if all(applied_statuses):
             self.applied_migrations.add(key)
         else:
             self.applied_migrations.discard(key)
         # A replacing migration can be used if either all or none of its
         # replacement targets have been applied.
         if all(applied_statuses) or (not any(applied_statuses)):
             self.graph.remove_replaced_nodes(key, migration.replaces)
         else:
             # This replacing migration cannot be used because it is partially applied.
             # Remove it from the graph and remap dependencies to it (#25945).
             self.graph.remove_replacement_node(key, migration.replaces)
     # Ensure the graph is consistent.
     try:
         self.graph.validate_consistency()
     except NodeNotFoundError as exc:
         # Check if the missing node could have been replaced by any squash
         # migration but wasn't because the squash migration was partially
         # applied before. In that case raise a more understandable exception
         # (#23556).
         # Get reverse replacements.
         reverse_replacements = {}
         for key, migration in self.replacements.items():
             for replaced in migration.replaces:
                 reverse_replacements.setdefault(replaced, set()).add(key)
         # Try to reraise exception with more detail.
         if exc.node in reverse_replacements:
             candidates = reverse_replacements.get(exc.node, set())
             is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
             if not is_replaced:
                 tries = ', '.join('%s.%s' % c for c in candidates)
                 exc_value = NodeNotFoundError(
                     "Migration {0} depends on nonexistent node ('{1}', '{2}'). "
                     "Django tried to replace migration {1}.{2} with any of [{3}] "
                     "but wasn't able to because some of the replaced migrations "
                     "are already applied.".format(
                         exc.origin, exc.node[0], exc.node[1], tries
                     ),
                     exc.node
                 )
                 exc_value.__cause__ = exc
                 if not hasattr(exc, '__traceback__'):
                     exc.__traceback__ = sys.exc_info()[2]
                 six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
         raise exc