def get_db_state(disposition=DISPOSITION_RO):
    impl = config.get_implementation()
    if impl is None:
        impl = sys.modules[__name__]

    db_filename = config.get_db_filename(impl=impl)
    lastblock_filename = config.get_lastblock_filename()

    firstcheck = True
    for path in [db_filename, lastblock_filename]:
        if os.path.exists(path):
            # have already create the db
            firstcheck = False
    if not firstcheck and not os.path.exists(lastblock_filename):
        log.error("FATAL: no such file or directory: %s" % lastblock_filename)

    # verify that it is well-formed if it exists
    elif os.path.exists(lastblock_filename):
        try:
            with open(lastblock_filename, "r") as f:
                int(f.read().strip())
        except Exception, e:
            log.error("FATAL: failed to parse: %s " % lastblock_filename)
            log.exception(e)
            os.abort()
Exemplo n.º 2
0
    def get(self, *args, **kwargs):
        """ GET request """

        jsonlist = []

        # get the logs (but only the public ones)
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()

        # get vehicle name information from vehicle table
        cur.execute('select UUID, Name from Vehicle')
        db_tuples = cur.fetchall()
        vehicle_table = {db_tuple[0]: db_tuple[1] for db_tuple in db_tuples}

        cur.execute(
            'select Id, Date, Description, WindSpeed, Rating, VideoUrl, ErrorLabels, '
            'Source, Feedback, Type from Logs where Public = 1')
        # need to fetch all here, because we will do more SQL calls while
        # iterating (having multiple cursor's does not seem to work)
        db_tuples = cur.fetchall()
        for db_tuple in db_tuples:
            jsondict = {}
            db_data = DBData()
            log_id = db_tuple[0]
            jsondict['log_id'] = log_id
            jsondict['log_date'] = db_tuple[1].strftime('%Y-%m-%d')
            db_data.description = db_tuple[2]
            db_data.wind_speed = db_tuple[3]
            db_data.rating = db_tuple[4]
            db_data.video_url = db_tuple[5]
            db_data.error_labels = sorted([int(x) for x in db_tuple[6].split(',') if len(x) > 0]) \
                if db_tuple[6] else []
            db_data.source = db_tuple[7]
            db_data.feedback = db_tuple[8]
            db_data.type = db_tuple[9]
            jsondict.update(db_data.to_json_dict())

            db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
            if db_data_gen is None:
                continue

            jsondict.update(db_data_gen.to_json_dict())
            # add vehicle name
            jsondict['vehicle_name'] = vehicle_table.get(
                jsondict['vehicle_uuid'], '')
            airframe_data = get_airframe_data(jsondict['sys_autostart_id'])
            jsondict['airframe_name'] = airframe_data.get('name', '') \
                if airframe_data is not None else ''
            jsondict['airframe_type'] = airframe_data.get('type', jsondict['sys_autostart_id']) \
                if airframe_data is not None else jsondict['sys_autostart_id']

            jsonlist.append(jsondict)

        cur.close()
        con.close()

        self.set_header('Content-Type', 'application/json')
        self.write(json.dumps(jsonlist))
Exemplo n.º 3
0
    def save( self, block_id, consensus_hash, pending_ops, backup=False ):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method.
        
        Return True on success 
        Return False on error
        Raise exception if block_id represents a block 
         we've already processed.
        """
        
        if block_id < self.lastblock:
           raise Exception("Already processed up to block %s (got %s)" % (self.lastblock, block_id))
        
        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename() + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename() + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename() + ".tmp")
        
        with open(tmp_snapshot_filename, 'w') as f:
            db_dict = {
               'snapshots': self.consensus_hashes
            }
            f.write(json.dumps(db_dict))
            f.flush()
        
        # put this last...
        with open(tmp_lastblock_filename, "w") as lastblock_f:
            lastblock_f.write("%s" % block_id)
            lastblock_f.flush()

        rc = self.impl.db_save( block_id, consensus_hash, pending_ops, tmp_db_filename, db_state=self.state )
        if not rc:
            # failed to save 
            log.error("Implementation failed to save at block %s to %s" % (block_id, tmp_db_filename))
            
            try:
                os.unlink( tmp_lastblock_filename )
            except:
                pass 
            
            try:
                os.unlink( tmp_snapshot_filename )
            except:
                pass 
            
            return False
       
        rc = self.commit( backup=backup )
        if not rc:
            log.error("Failed to commit data at block %s.  Rolling back." % block_id )
            
            self.rollback()
            return False 
        
        else:
            self.lastblock = block_id
            return True
    def commit(self, backup=False, startup=False):
        """
        Move all written but uncommitted data into place.
        Return True on success
        Return False on error (in which case the caller should rollback())

        It is safe to call this method repeatedly until it returns True.
        """

        if self.read_only:
            log.error("FATAL: read-only")
            os.abort()

        tmp_db_filename = config.get_db_filename(impl=self.impl) + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename(impl=self.impl) + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename(impl=self.impl) + ".tmp"

        if not os.path.exists(tmp_lastblock_filename) and (
            os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback
            log.error("Partial write detected.  Not committing.")
            return False

        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists(tmp_db_filename):
            db_dir = os.path.dirname(tmp_db_filename)

            try:
                dirfd = os.open(db_dir, os.O_DIRECTORY)
                os.fsync(dirfd)
                os.close(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % db_dir)
                traceback.print_stack()
                os.abort()

            sb = os.stat(tmp_db_filename)
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False
Exemplo n.º 5
0
    def get_backup_paths( cls, block_id, impl, working_dir=None ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
Exemplo n.º 6
0
    def get_backup_paths( cls, block_id, impl ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
Exemplo n.º 7
0
    def commit( self, backup=False, startup=False ):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        if self.read_only:
           log.error("FATAL: read-only")
           os.abort()

        tmp_db_filename = config.get_db_filename(impl=self.impl) + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename(impl=self.impl) + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename(impl=self.impl) + ".tmp"
        
        if not os.path.exists( tmp_lastblock_filename ) and (os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback 
            log.error("Partial write detected.  Not committing.")
            return False
           
        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists( tmp_db_filename ):
            db_dir = os.path.dirname( tmp_db_filename )

            try:
                dirfd = os.open(db_dir, os.O_DIRECTORY)
                os.fsync(dirfd)
                os.close( dirfd )
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % db_dir)
                traceback.print_stack()
                os.abort()

            sb = os.stat( tmp_db_filename )
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this 
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False
Exemplo n.º 8
0
    def backup_restore( cls, block_id, impl, working_dir=None ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl, working_dir=working_dir )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
Exemplo n.º 9
0
    def backup_restore( cls, block_id, impl ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
    def save(self, block_id, consensus_hash, pending_ops, backup=False):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method to store any state for this block.
        Calls the implementation's 'db_continue' method at the very end, to signal
        to the implementation that all virtualchain state has been saved.  This method
        can return False, in which case, indexing stops

        Return True on success
        Return False if the implementation wants to exit.
        Aborts on fatal error
        """

        if self.read_only:
            log.error("FATAL: read only")
            traceback.print_stack()
            os.abort()

        if block_id < self.lastblock:
            log.error("FATAL: Already processed up to block %s (got %s)" % (self.lastblock, block_id))
            traceback.print_stack()
            os.abort()

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename(impl=self.impl) + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename(impl=self.impl) + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename(impl=self.impl) + ".tmp")

        try:
            with open(tmp_snapshot_filename, 'w') as f:
                db_dict = {
                    'snapshots': self.consensus_hashes
                }
                f.write(json.dumps(db_dict))
                f.flush()

            with open(tmp_lastblock_filename, "w") as lastblock_f:
                lastblock_f.write("%s" % block_id)
                lastblock_f.flush()

        except Exception, e:
            # failure to save is fatal
            log.exception(e)
            log.error("FATAL: Could not stage data for block %s" % block_id)
            traceback.print_stack()
            os.abort()
Exemplo n.º 11
0
 def rollback( self ):
     """
     Roll back a pending write: blow away temporary files.
     """
     
     tmp_db_filename = config.get_db_filename() + ".tmp"
     tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
     tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
     
     for f in [tmp_db_filename, tmp_snapshot_filename, tmp_lastblock_filename]:
         if os.path.exists( f ):
             
             try:
                 os.unlink( f )
             except:
                 log.error("Failed to unlink '%s'" % f )
                 pass
Exemplo n.º 12
0
 def rollback( self ):
     """
     Roll back a pending write: blow away temporary files.
     """
     
     tmp_db_filename = config.get_db_filename() + ".tmp"
     tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
     tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
     
     for f in [tmp_db_filename, tmp_snapshot_filename, tmp_lastblock_filename]:
         if os.path.exists( f ):
             
             try:
                 os.unlink( f )
             except:
                 log.error("Failed to unlink '%s'" % f )
                 pass
Exemplo n.º 13
0
    def save( self, block_id, consensus_hash, pending_ops, backup=False ):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method to store any state for this block.
        Calls the implementation's 'db_continue' method at the very end, to signal
        to the implementation that all virtualchain state has been saved.  This method
        can return False, in which case, indexing stops
        
        Return True on success 
        Return False if the implementation wants to exit.
        Aborts on fatal error
        """
        
        if self.read_only:
            log.error("FATAL: read only")
            traceback.print_stack()
            os.abort()

        if block_id < self.lastblock:
            log.error("FATAL: Already processed up to block %s (got %s)" % (self.lastblock, block_id))
            traceback.print_stack()
            os.abort()

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename(impl=self.impl) + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename(impl=self.impl) + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename(impl=self.impl) + ".tmp")
        
        try:
            with open(tmp_snapshot_filename, 'w') as f:
                db_dict = {
                   'snapshots': self.consensus_hashes
                }
                f.write(json.dumps(db_dict))
                f.flush()
            
            with open(tmp_lastblock_filename, "w") as lastblock_f:
                lastblock_f.write("%s" % block_id)
                lastblock_f.flush()

        except Exception, e:
            # failure to save is fatal 
            log.exception(e)
            log.error("FATAL: Could not stage data for block %s" % block_id)
            traceback.print_stack()
            os.abort()
Exemplo n.º 14
0
def generate_db_data_from_log_file(log_id, db_connection=None):
    """
    Extract necessary information from the log file and insert as an entry to
    the LogsGenerated table (faster information retrieval later on).
    This is an expensive operation.
    It's ok to call this a second time for the same log, the call will just
    silently fail (but still read the whole log and will not update the DB entry)

    :return: DBDataGenerated object
    """

    db_data_gen = DBDataGenerated.from_log_file(log_id)

    need_closing = False
    if db_connection is None:
        db_connection = sqlite3.connect(get_db_filename())
        need_closing = True

    db_cursor = db_connection.cursor()
    try:
        db_cursor.execute(
            'insert into LogsGenerated (Id, Duration, '
            'Mavtype, Estimator, AutostartId, Hardware, '
            'Software, NumLoggedErrors, NumLoggedWarnings, '
            'FlightModes, SoftwareVersion, UUID, FlightModeDurations, StartTime) values '
            '(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [
                log_id, db_data_gen.duration_s, db_data_gen.mav_type,
                db_data_gen.estimator, db_data_gen.sys_autostart_id,
                db_data_gen.sys_hw, db_data_gen.ver_sw,
                db_data_gen.num_logged_errors, db_data_gen.num_logged_warnings,
                ','.join(map(str, db_data_gen.flight_modes)),
                db_data_gen.ver_sw_release, db_data_gen.vehicle_uuid,
                db_data_gen.flight_mode_durations_str(),
                db_data_gen.start_time_utc
            ])
        db_connection.commit()
    except sqlite3.IntegrityError:
        # someone else already inserted it (race). just ignore it
        pass

    db_cursor.close()
    if need_closing:
        db_connection.close()

    return db_data_gen
Exemplo n.º 15
0
    def delete_log_entry(log_id, token):
        """
        delete a log entry (DB & file), validate token first

        :return: True on success
        """
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()
        cur.execute('select Token from Logs where Id = ?', (log_id, ))
        db_tuple = cur.fetchone()
        if db_tuple is None:
            return False
        if token != db_tuple[0]:  # validate token
            return False

        # kml file
        kml_path = get_kml_filepath()
        kml_file_name = os.path.join(kml_path,
                                     log_id.replace('/', '.') + '.kml')
        if os.path.exists(kml_file_name):
            os.unlink(kml_file_name)

        #preview image
        preview_image_filename = os.path.join(get_overview_img_filepath(),
                                              log_id + '.png')
        if os.path.exists(preview_image_filename):
            os.unlink(preview_image_filename)

        log_file_name = get_log_filename(log_id)
        print('deleting log entry {} and file {}'.format(
            log_id, log_file_name))
        os.unlink(log_file_name)
        cur.execute("DELETE FROM LogsGenerated WHERE Id = ?", (log_id, ))
        cur.execute("DELETE FROM Logs WHERE Id = ?", (log_id, ))
        con.commit()
        cur.close()
        con.close()

        # need to clear the cache as well
        clear_ulog_cache()

        return True
Exemplo n.º 16
0
 def get_original_filename(default_value, new_file_suffix):
     """
     get the uploaded file name & exchange the file extension
     """
     try:
         con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
         cur = con.cursor()
         cur.execute('select OriginalFilename '
                     'from Logs where Id = ?', [log_id])
         db_tuple = cur.fetchone()
         if db_tuple is not None:
             original_file_name = escape(db_tuple[0])
             if original_file_name[-4:].lower() == '.ulg':
                 original_file_name = original_file_name[:-4]
             return original_file_name + new_file_suffix
         cur.close()
         con.close()
     except:
         print("DB access failed:", sys.exc_info()[0], sys.exc_info()[1])
     return default_value
    def get(self, *args, **kwargs):

        jsonlist = list()

        # get the logs (but only the public ones)
        con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()
        cur.execute('select Id, Date, Description, WindSpeed, Rating, VideoUrl, ErrorLabels '
                    'from Logs where Public = 1')
        # need to fetch all here, because we will do more SQL calls while
        # iterating (having multiple cursor's does not seem to work)
        db_tuples = cur.fetchall()
        for db_tuple in db_tuples:
            jsondict = dict()
            db_data = DBData()
            log_id = db_tuple[0]
            jsondict['log_id'] = log_id
            jsondict['log_date'] = db_tuple[1].strftime('%Y-%m-%d')
            db_data.description = db_tuple[2]
            db_data.feedback = ''
            db_data.type = ''
            db_data.wind_speed = db_tuple[3]
            db_data.rating = db_tuple[4]
            db_data.video_url = db_tuple[5]
            db_data.error_labels = sorted([int(x) for x in db_tuple[6].split(',') if len(x) > 0]) \
                if db_tuple[6] else []
            jsondict.update(db_data.to_json_dict())

            db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
            if db_data_gen is None:
                continue

            jsondict.update(db_data_gen.to_json_dict())
            jsonlist.append(jsondict)

        cur.close()
        con.close()

        self.set_header('Content-Type', 'application/json')
        self.write(json.dumps(jsonlist))
Exemplo n.º 18
0
    def make_backups( self, block_id ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl), config.get_snapshots_filename(impl=self.impl), config.get_lastblock_filename(impl=self.impl)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
Exemplo n.º 19
0
    def make_backups( self, block_id, working_dir=None ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl, working_dir=working_dir), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl, working_dir=working_dir), config.get_snapshots_filename(impl=self.impl, working_dir=working_dir), config.get_lastblock_filename(impl=self.impl, working_dir=working_dir)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
    def post(self, *args, **kwargs):
        """ POST request callback """
        if self.multipart_streamer:
            try:
                self.multipart_streamer.data_complete()
                form_data = self.multipart_streamer.get_values([
                    'description', 'email', 'allowForAnalysis', 'obfuscated',
                    'source', 'type', 'feedback', 'windSpeed', 'rating',
                    'videoUrl', 'public', 'vehicleName'
                ])

                description = escape(form_data['description'].decode("utf-8"))
                email = form_data.get('email',
                                      bytes("(no email provided)",
                                            'utf-8')).decode("utf-8")
                upload_type = form_data.get('type',
                                            bytes("personal",
                                                  'utf-8')).decode("utf-8")
                source = form_data.get('source',
                                       bytes("webui", 'utf-8')).decode("utf-8")
                title = ''  # may be used in future...
                obfuscated = {
                    'true': 1,
                    'false': 0
                }.get(
                    form_data.get('obfuscated', b'false').decode('utf-8'), 0)
                allow_for_analysis = {
                    'true': 1,
                    'false': 0
                }.get(
                    form_data.get('allowForAnalysis',
                                  b'false').decode('utf-8'), 0)
                feedback = escape(
                    form_data.get('feedback', b'').decode("utf-8"))

                wind_speed = -1
                rating = ''
                video_url = ''
                is_public = 1
                vehicle_name = escape(
                    form_data.get('vehicleName',
                                  bytes("", 'utf-8')).decode("utf-8"))
                error_labels = ''

                # TODO: make the format of formdict a little more compatible with form_data above
                formdict = {}
                formdict['description'] = description
                formdict['email'] = email
                formdict['upload_type'] = upload_type
                formdict['source'] = source
                formdict['title'] = title
                formdict['obfuscated'] = obfuscated
                formdict['allow_for_analysis'] = allow_for_analysis
                formdict['feedback'] = feedback
                formdict['wind_speed'] = wind_speed
                formdict['rating'] = rating
                formdict['video_url'] = video_url
                formdict['is_public'] = is_public
                formdict['vehicle_name'] = vehicle_name
                formdict['error_labels'] = error_labels

                # we don't bother parsing any of the "flight report" metadata, it's not very useful to us
                # stored_email = ''
                # if upload_type == 'flightreport':
                #     try:
                #         wind_speed = int(escape(form_data['windSpeed'].decode("utf-8")))
                #     except ValueError:
                #         wind_speed = -1
                #     rating = escape(form_data['rating'].decode("utf-8"))
                #     if rating == 'notset': rating = ''
                #     stored_email = email
                #     # get video url & check if valid
                #     video_url = escape(form_data['videoUrl'].decode("utf-8"), quote=True)
                #     if not validate_url(video_url):
                #         video_url = ''
                #     if 'vehicleName' in form_data:
                #         vehicle_name = escape(form_data['vehicleName'].decode("utf-8"))

                #     # always allow for statistical analysis
                #     allow_for_analysis = 1
                #     if 'public' in form_data:
                #         if form_data['public'].decode("utf-8") == 'true':
                #             is_public = 1

                # open the database connection
                con = sqlite3.connect(get_db_filename())
                cur = con.cursor()

                file_obj = self.multipart_streamer.get_parts_by_name(
                    'filearg')[0]
                upload_file_name = file_obj.get_filename()

                # read file header and ensure validity
                peek_ulog_header = file_obj.get_payload_partial(
                    len(ULog.HEADER_BYTES))
                peek_zip_header = file_obj.get_payload_partial(4)
                zip_headers = [
                    b'\x50\x4b\x03\x04', b'\x50\x4b\x05\x06',
                    b'\x50\x4b\x07\x08'
                ]
                # we check that it is either a well formed zip or ULog
                # is file a ULog? then continue as we were :)
                if (peek_ulog_header == ULog.HEADER_BYTES):
                    log_id = save_uploaded_log(con, cur, file_obj, formdict)

                    # generate URL info and redirect
                    url = '/plot_app?log=' + log_id
                    full_plot_url = get_http_protocol(
                    ) + '://' + get_domain_name() + url
                    print(full_plot_url)
                    # do not redirect for QGC
                    if source != 'QGroundControl':
                        self.redirect(url)

                # is the file a zip? read the magic numbers and unzip it
                elif (peek_zip_header in zip_headers):
                    with zipfile.ZipFile(file_obj.f_out) as zip:
                        for log_filename in zip.namelist():
                            # make sure we're dealing with a ulog file
                            # TODO: do actual validation here, don't just check filename
                            _, ext = os.path.splitext(log_filename)
                            if ext not in ['.ulg', '.ulog']:
                                print(
                                    f'Skipping extracting non-ULog file {file_obj.f_out.name}//{log_filename}'
                                )
                                continue
                            # TODO: switch to save_uploaded_log
                            # generate a log ID and persistence filename
                            while True:
                                log_id = str(uuid.uuid4())
                                new_file_name = get_log_filename(log_id)
                                if not os.path.exists(new_file_name):
                                    break
                            # extract and rename the ulog file to something we control
                            print(
                                f'Extracting uploaded log {file_obj.f_out.name}//{log_filename} file to',
                                new_file_name)
                            zip.extract(log_filename,
                                        path=os.path.dirname(new_file_name))
                            os.rename(
                                os.path.join(os.path.dirname(new_file_name),
                                             log_filename), new_file_name)
                            # Load the ulog file but only if not uploaded via CI.
                            ulog = None
                            if source != 'CI':
                                ulog_file_name = get_log_filename(log_id)
                                ulog = load_ulog_file(ulog_file_name)
                            # generate a token: secure random string (url-safe)
                            token = str(binascii.hexlify(os.urandom(16)),
                                        'ascii')
                            # put additional data into a DB
                            cur.execute(
                                'insert into Logs (Id, Title, Description, '
                                'OriginalFilename, Date, AllowForAnalysis, Obfuscated, '
                                'Source, Email, WindSpeed, Rating, Feedback, Type, '
                                'videoUrl, ErrorLabels, Public, Token) values '
                                '(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
                                [
                                    log_id, title, description,
                                    upload_file_name,
                                    datetime.datetime.now(),
                                    allow_for_analysis, obfuscated, source,
                                    email, wind_speed, rating, feedback,
                                    upload_type, video_url, error_labels,
                                    is_public, token
                                ])
                            if ulog is not None:
                                vehicle_data = update_vehicle_db_entry(
                                    cur, ulog, log_id, vehicle_name)
                                vehicle_name = vehicle_data.name
                            con.commit()
                            generate_db_data_from_log_file(log_id, con)
                            con.commit()

                            # generate URL info and redirect
                            url = '/plot_app?log=' + log_id
                            full_plot_url = get_http_protocol(
                            ) + '://' + get_domain_name() + url
                            print(full_plot_url)
                        self.redirect('/browse')
                # is file neither a zip nor a ULog? error out :)
                else:
                    if upload_file_name[-7:].lower() == '.px4log':
                        raise CustomHTTPError(
                            400,
                            'Invalid File. This seems to be a px4log file. '
                            'Upload it to <a href="http://logs.uaventure.com" '
                            'target="_blank">logs.uaventure.com</a>.')
                    raise CustomHTTPError(400, 'Invalid File')

                # this massive chunk of comment was the code used to send emails for
                # uploaded flight reports. we no longer use this functionality.
                # however, for some weird reason, this chunk of code also generated a
                # LogsGenerated entry for faster log loading for public logs. so
                # we move the line up and out of the code it's not supposed to be a part
                # of, and put it right here :)
                #generate_db_data_from_log_file(log_id, con)

                # delete_url = get_http_protocol()+'://'+get_domain_name()+ \
                #     '/edit_entry?action=delete&log='+log_id+'&token='+token

                # information for the notification email
                # info = {}
                # info['description'] = description
                # info['feedback'] = feedback
                # info['upload_filename'] = upload_file_name
                # info['type'] = ''
                # info['airframe'] = ''
                # info['hardware'] = ''
                # info['uuid'] = ''
                # info['software'] = ''
                # info['rating'] = rating
                # if len(vehicle_name) > 0:
                #     info['vehicle_name'] = vehicle_name

                # if ulog is not None:
                #     px4_ulog = PX4ULog(ulog)
                #     info['type'] = px4_ulog.get_mav_type()
                #     airframe_name_tuple = get_airframe_name(ulog)
                #     if airframe_name_tuple is not None:
                #         airframe_name, airframe_id = airframe_name_tuple
                #         if len(airframe_name) == 0:
                #             info['airframe'] = airframe_id
                #         else:
                #             info['airframe'] = airframe_name
                #     sys_hardware = ''
                #     if 'ver_hw' in ulog.msg_info_dict:
                #         sys_hardware = escape(ulog.msg_info_dict['ver_hw'])
                #         info['hardware'] = sys_hardware
                #     if 'sys_uuid' in ulog.msg_info_dict and sys_hardware != 'SITL':
                #         info['uuid'] = escape(ulog.msg_info_dict['sys_uuid'])
                #     branch_info = ''
                #     if 'ver_sw_branch' in ulog.msg_info_dict:
                #         branch_info = ' (branch: '+ulog.msg_info_dict['ver_sw_branch']+')'
                #     if 'ver_sw' in ulog.msg_info_dict:
                #         ver_sw = escape(ulog.msg_info_dict['ver_sw'])
                #         info['software'] = ver_sw + branch_info

                # if upload_type == 'flightreport' and is_public and source != 'CI':
                #     destinations = set(email_notifications_config['public_flightreport'])
                #     if rating in ['unsatisfactory', 'crash_sw_hw', 'crash_pilot']:
                #         destinations = destinations | \
                #             set(email_notifications_config['public_flightreport_bad'])
                #     send_flightreport_email(
                #         list(destinations),
                #         full_plot_url,
                #         DBData.rating_str_static(rating),
                #         DBData.wind_speed_str_static(wind_speed), delete_url,
                #         stored_email, info)

                #     # also generate the additional DB entry
                #     # (we may have the log already loaded in 'ulog', however the
                #     # lru cache will make it very quick to load it again)
                #     generate_db_data_from_log_file(log_id, con)
                #     # also generate the preview image
                #     IOLoop.instance().add_callback(generate_overview_img_from_id, log_id)

                # send notification emails
                # send_notification_email(email, full_plot_url, delete_url, info)

            except CustomHTTPError:
                raise

            except ULogException as e:
                raise CustomHTTPError(
                    400, 'Failed to parse the file. It is most likely corrupt.'
                ) from e
            except Exception as e:
                print('Fatal error when handling POST data',
                      sys.exc_info()[0],
                      sys.exc_info()[1])
                traceback.print_exc()
                raise CustomHTTPError(500) from e

            finally:
                # close our DB connections
                cur.close()
                con.close()
                # free the uploaded files
                self.multipart_streamer.release_parts()
Exemplo n.º 21
0
    def __init__(self, plot_config, verbose_output=False):

        self._config = plot_config

        self._verbose_output = verbose_output

        # lists of dates when a _log was uploaded, one list per type
        self._public_logs_dates = []
        self._private_logs_dates = []
        self._ci_logs_dates = []
        self._all_logs_dates = []

        self._public_logs = []

        # read from the DB
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        with con:
            cur = con.cursor()

            cur.execute('select Id, Date, Source, Public, Rating from Logs')

            db_tuples = cur.fetchall()
            for db_tuple in db_tuples:
                log = _Log(db_tuple)

                self._all_logs_dates.append(log.date)
                if log.is_public == 1:
                    if log.source == 'CI':
                        self._ci_logs_dates.append(log.date)
                    else:
                        self._public_logs_dates.append(log.date)
                else:
                    if log.source == 'CI':
                        self._ci_logs_dates.append(log.date)
                    else:
                        self._private_logs_dates.append(log.date)

                # LogsGenerated: public only
                if log.is_public != 1 or log.source == 'CI':
                    continue

                cur.execute('select * from LogsGenerated where Id = ?',
                            [log.log_id])
                db_tuple = cur.fetchone()

                if db_tuple is None:
                    print("Error: no generated data")
                    continue

                log.set_generated(db_tuple)

                # filter bogus entries
                if log.sw_version == 'v0.0.0':
                    if self._verbose_output:
                        print('Warning: %s with version=v0.0.0' % log.log_id)
                    continue
                if log.duration > 7 * 24 * 3600:  # probably bogus timestamp(s)
                    if self._verbose_output:
                        print('Warning: %s with very high duration %i' %
                              (log.log_id, log.duration))
                    continue

                if log.sw_version == '':
                    # FIXME: does that still occur and if so why?
                    if self._verbose_output:
                        print('Warning: %s version not set' % log.log_id)
                    continue

                if log.autostart_id == 0:
                    print('Warning: %s with autostart_id=0' % log.log_id)
                    continue

                try:
                    ver_major = int(log.sw_version[1:].split('.')[0])
                    if ver_major >= 2 or ver_major == 0:
                        print('Warning: %s with large/small version %s' %
                              (log.log_id, log.sw_version))
                        continue
                except:
                    continue

                self._public_logs.append(log)

        self._version_data = {}  # dict of _VersionData items
        self._all_airframes = set()
        self._all_boards = set()
        self._all_ratings = set()
        self._all_flight_modes = set()
        self._total_duration = 0  # in hours, public logs only
        self._total_last_version_duration = 0  # in hours, public logs only
        self._latest_major_release = ""

        for log in self._public_logs:
            if not log.sw_version in self._version_data:
                self._version_data[log.sw_version] = _VersionData()

            self._all_airframes.add(str(log.autostart_id))
            self._all_boards.add(log.hardware)
            self._all_ratings.add(log.rating)

            cur_version_data = self._version_data[log.sw_version]
            boards = cur_version_data.boards
            boards_num_logs = cur_version_data.boards_num_logs
            airframes = cur_version_data.airframes
            airframes_num_logs = cur_version_data.airframes_num_logs
            ratings = cur_version_data.ratings
            flight_modes = cur_version_data.flight_mode_durations

            if not log.hardware in boards:
                boards[log.hardware] = 0
                boards_num_logs[log.hardware] = 0
            boards[log.hardware] += log.duration / 3600.
            boards_num_logs[log.hardware] += 1

            for flight_mode, duration in log.flight_mode_durations:
                flight_mode_str = str(flight_mode)
                self._all_flight_modes.add(flight_mode_str)
                if not flight_mode_str in flight_modes:
                    flight_modes[flight_mode_str] = 0.
                flight_modes[flight_mode_str] += duration / 3600.

            autostart_str = str(log.autostart_id)
            if not autostart_str in airframes:
                airframes[autostart_str] = 0
                airframes_num_logs[autostart_str] = 0
            airframes[autostart_str] += log.duration / 3600.
            airframes_num_logs[autostart_str] += 1

            if not log.rating in ratings:
                ratings[log.rating] = 0
            ratings[log.rating] += 1

            self._total_duration += log.duration / 3600.

        if len(self._version_data) > 0:
            latest_version = sorted(self._version_data,
                                    key=functools.cmp_to_key(
                                        _Log.compare_version))[-1]
            latest_major_version = latest_version.split('.')[0:2]
            self._latest_major_release = '.'.join(latest_major_version)
            for log in self._public_logs:
                if log.sw_version.split('.')[0:2] == latest_major_version:
                    self._total_last_version_duration += log.duration / 3600.
Exemplo n.º 22
0
    def save(self, block_id, consensus_hash, pending_ops, backup=False):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method.
        
        Return True on success 
        Return False on error
        Raise exception if block_id represents a block 
         we've already processed.
        """

        if block_id < self.lastblock:
            raise Exception("Already processed up to block %s (got %s)" %
                            (self.lastblock, block_id))

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename() + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename() + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename() + ".tmp")

        with open(tmp_snapshot_filename, 'w') as f:
            db_dict = {'snapshots': self.consensus_hashes}
            f.write(json.dumps(db_dict))
            f.flush()

        # put this last...
        with open(tmp_lastblock_filename, "w") as lastblock_f:
            lastblock_f.write("%s" % block_id)
            lastblock_f.flush()

        rc = self.impl.db_save(block_id,
                               consensus_hash,
                               pending_ops,
                               tmp_db_filename,
                               db_state=self.state)
        if not rc:
            # failed to save
            log.error("Implementation failed to save at block %s to %s" %
                      (block_id, tmp_db_filename))

            try:
                os.unlink(tmp_lastblock_filename)
            except:
                pass

            try:
                os.unlink(tmp_snapshot_filename)
            except:
                pass

            return False

        rc = self.commit(backup=backup)
        if not rc:
            log.error("Failed to commit data at block %s.  Rolling back." %
                      block_id)

            self.rollback()
            return False

        else:
            self.lastblock = block_id
            return True
Exemplo n.º 23
0
    def commit(self, backup=False, startup=False):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        tmp_db_filename = config.get_db_filename() + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"

        if not os.path.exists(tmp_lastblock_filename) and (
                os.path.exists(tmp_db_filename)
                or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback
            log.error("Partial write detected.  Not committing.")
            return False

        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists(tmp_db_filename):
            sb = os.stat(tmp_db_filename)
            if sb.st_size == 0:
                log.error(
                    "Partial write detected: tried to overwrite with zero-sized db!  Will rollback."
                )
                return False

            if startup:
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error(
                        "Partial write detected: corrupt partially-committed db!  Will rollback."
                    )
                    return False

        backup_time = int(time.time() * 1000000)

        for tmp_filename, filename in zip( [tmp_lastblock_filename, tmp_snapshot_filename, tmp_db_filename], \
                                           [config.get_lastblock_filename(), config.get_snapshots_filename(), config.get_db_filename()] ):

            if not os.path.exists(tmp_filename):
                continue

            # commit our new lastblock, consensus hash set, and state engine data
            try:

                # NOTE: rename fails on Windows if the destination exists
                if sys.platform == 'win32' and os.path.exists(filename):

                    try:
                        os.unlink(filename)
                    except:
                        pass

                if not backup:
                    os.rename(tmp_filename, filename)
                else:
                    shutil.copy(tmp_filename, filename)
                    os.rename(tmp_filename,
                              tmp_filename + (".%s" % backup_time))

            except Exception, e:

                log.exception(e)
                return False
Exemplo n.º 24
0
    def post(self, *args, **kwargs):
        """ POST request callback """
        if self.multipart_streamer:
            try:
                self.multipart_streamer.data_complete()
                form_data = self.multipart_streamer.get_values(
                    ['description', 'email',
                     'allowForAnalysis', 'obfuscated', 'source', 'type',
                     'feedback', 'windSpeed', 'rating', 'videoUrl', 'public',
                     'vehicleName'])
                description = escape(form_data['description'].decode("utf-8"))
                email = form_data['email'].decode("utf-8")
                upload_type = 'personal'
                if 'type' in form_data:
                    upload_type = form_data['type'].decode("utf-8")
                source = 'webui'
                title = '' # may be used in future...
                if 'source' in form_data:
                    source = form_data['source'].decode("utf-8")
                obfuscated = 0
                if 'obfuscated' in form_data:
                    if form_data['obfuscated'].decode("utf-8") == 'true':
                        obfuscated = 1
                allow_for_analysis = 0
                if 'allowForAnalysis' in form_data:
                    if form_data['allowForAnalysis'].decode("utf-8") == 'true':
                        allow_for_analysis = 1
                feedback = ''
                if 'feedback' in form_data:
                    feedback = escape(form_data['feedback'].decode("utf-8"))
                wind_speed = -1
                rating = ''
                stored_email = ''
                video_url = ''
                is_public = 0
                vehicle_name = ''
                error_labels = ''

                if upload_type == 'flightreport':
                    try:
                        wind_speed = int(escape(form_data['windSpeed'].decode("utf-8")))
                    except ValueError:
                        wind_speed = -1
                    rating = escape(form_data['rating'].decode("utf-8"))
                    if rating == 'notset': rating = ''
                    stored_email = email
                    # get video url & check if valid
                    video_url = escape(form_data['videoUrl'].decode("utf-8"), quote=True)
                    if not validate_url(video_url):
                        video_url = ''
                    if 'vehicleName' in form_data:
                        vehicle_name = escape(form_data['vehicleName'].decode("utf-8"))

                    # always allow for statistical analysis
                    allow_for_analysis = 1
                    if 'public' in form_data:
                        if form_data['public'].decode("utf-8") == 'true':
                            is_public = 1

                file_obj = self.multipart_streamer.get_parts_by_name('filearg')[0]
                upload_file_name = file_obj.get_filename()

                while True:
                    log_id = str(uuid.uuid4())
                    new_file_name = get_log_filename(log_id)
                    if not os.path.exists(new_file_name):
                        break

                # read file header & check if really an ULog file
                header_len = len(ULog.HEADER_BYTES)
                if (file_obj.get_payload_partial(header_len) !=
                        ULog.HEADER_BYTES):
                    if upload_file_name[-7:].lower() == '.px4log':
                        raise CustomHTTPError(
                            400,
                            'Invalid File. This seems to be a px4log file. '
                            'Upload it to <a href="http://logs.uaventure.com" '
                            'target="_blank">logs.uaventure.com</a>.')
                    raise CustomHTTPError(400, 'Invalid File')

                print('Moving uploaded file to', new_file_name)
                file_obj.move(new_file_name)

                if obfuscated == 1:
                    # TODO: randomize gps data, ...
                    pass

                # generate a token: secure random string (url-safe)
                token = str(binascii.hexlify(os.urandom(16)), 'ascii')

                # Load the ulog file but only if not uploaded via CI.
                # Then we open the DB connection.
                ulog = None
                if source != 'CI':
                    ulog_file_name = get_log_filename(log_id)
                    ulog = load_ulog_file(ulog_file_name)


                # put additional data into a DB
                con = sqlite3.connect(get_db_filename())
                cur = con.cursor()
                cur.execute(
                    'insert into Logs (Id, Title, Description, '
                    'OriginalFilename, Date, AllowForAnalysis, Obfuscated, '
                    'Source, Email, WindSpeed, Rating, Feedback, Type, '
                    'videoUrl, ErrorLabels, Public, Token) values '
                    '(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
                    [log_id, title, description, upload_file_name,
                     datetime.datetime.now(), allow_for_analysis,
                     obfuscated, source, stored_email, wind_speed, rating,
                     feedback, upload_type, video_url, error_labels, is_public, token])

                if ulog is not None:
                    vehicle_data = update_vehicle_db_entry(cur, ulog, log_id, vehicle_name)
                    vehicle_name = vehicle_data.name

                con.commit()

                url = '/plot_app?log='+log_id
                full_plot_url = get_http_protocol()+'://'+get_domain_name()+url
                print(full_plot_url)

                delete_url = get_http_protocol()+'://'+get_domain_name()+ \
                    '/edit_entry?action=delete&log='+log_id+'&token='+token

                # information for the notification email
                info = {}
                info['description'] = description
                info['feedback'] = feedback
                info['upload_filename'] = upload_file_name
                info['type'] = ''
                info['airframe'] = ''
                info['hardware'] = ''
                info['uuid'] = ''
                info['software'] = ''
                info['rating'] = rating
                if len(vehicle_name) > 0:
                    info['vehicle_name'] = vehicle_name

                if ulog is not None:
                    px4_ulog = PX4ULog(ulog)
                    info['type'] = px4_ulog.get_mav_type()
                    airframe_name_tuple = get_airframe_name(ulog)
                    if airframe_name_tuple is not None:
                        airframe_name, airframe_id = airframe_name_tuple
                        if len(airframe_name) == 0:
                            info['airframe'] = airframe_id
                        else:
                            info['airframe'] = airframe_name
                    sys_hardware = ''
                    if 'ver_hw' in ulog.msg_info_dict:
                        sys_hardware = escape(ulog.msg_info_dict['ver_hw'])
                        info['hardware'] = sys_hardware
                    if 'sys_uuid' in ulog.msg_info_dict and sys_hardware != 'SITL':
                        info['uuid'] = escape(ulog.msg_info_dict['sys_uuid'])
                    branch_info = ''
                    if 'ver_sw_branch' in ulog.msg_info_dict:
                        branch_info = ' (branch: '+ulog.msg_info_dict['ver_sw_branch']+')'
                    if 'ver_sw' in ulog.msg_info_dict:
                        ver_sw = escape(ulog.msg_info_dict['ver_sw'])
                        info['software'] = ver_sw + branch_info


                if upload_type == 'flightreport' and is_public:
                    destinations = set(email_notifications_config['public_flightreport'])
                    if rating in ['unsatisfactory', 'crash_sw_hw', 'crash_pilot']:
                        destinations = destinations | \
                            set(email_notifications_config['public_flightreport_bad'])
                    send_flightreport_email(
                        list(destinations),
                        full_plot_url,
                        DBData.rating_str_static(rating),
                        DBData.wind_speed_str_static(wind_speed), delete_url,
                        stored_email, info)

                    # also generate the additional DB entry
                    # (we may have the log already loaded in 'ulog', however the
                    # lru cache will make it very quick to load it again)
                    generate_db_data_from_log_file(log_id, con)
                    # also generate the preview image
                    IOLoop.instance().add_callback(generate_overview_img_from_id, log_id)

                con.commit()
                cur.close()
                con.close()

                # send notification emails
                send_notification_email(email, full_plot_url, delete_url, info)

                # do not redirect for QGC
                if source != 'QGroundControl':
                    self.redirect(url)

            except CustomHTTPError:
                raise

            except ULogException:
                raise CustomHTTPError(
                    400,
                    'Failed to parse the file. It is most likely corrupt.')
            except:
                print('Error when handling POST data', sys.exc_info()[0],
                      sys.exc_info()[1])
                raise CustomHTTPError(500)

            finally:
                self.multipart_streamer.release_parts()
Exemplo n.º 25
0
    def commit( self, backup=False, startup=False ):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        tmp_db_filename = config.get_db_filename() + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
        
        if not os.path.exists( tmp_lastblock_filename ) and (os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback 
            log.error("Partial write detected.  Not committing.")
            return False
            
        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists( tmp_db_filename ):
            sb = os.stat( tmp_db_filename )
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this 
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        
        backup_time = int(time.time() * 1000000)

        for tmp_filename, filename in zip( [tmp_lastblock_filename, tmp_snapshot_filename, tmp_db_filename], \
                                           [config.get_lastblock_filename(), config.get_snapshots_filename(), config.get_db_filename()] ):
               
            if not os.path.exists( tmp_filename ):
                continue  

            # commit our new lastblock, consensus hash set, and state engine data
            try:
               
               # NOTE: rename fails on Windows if the destination exists 
               if sys.platform == 'win32' and os.path.exists( filename ):
                  
                  try:
                     os.unlink( filename )
                  except:
                     pass

               if not backup:
                   os.rename( tmp_filename, filename )
               else:
                   shutil.copy( tmp_filename, filename )
                   os.rename( tmp_filename, tmp_filename + (".%s" % backup_time))
                  
            except Exception, e:
               
               log.exception(e)
               return False 
def get_readonly_db_state(disposition=DISPOSITION_RO):
    impl = config.get_implementation()
    db_filename = config.get_db_filename(impl=impl)
    db_inst = ZonefileManageDB(db_filename, disposition)
    return db_inst
Exemplo n.º 27
0
    def get(self, *args, **kwargs):
        search_str = self.get_argument('search[value]', '').lower()
        data_start = int(self.get_argument('start'))
        data_length = int(self.get_argument('length'))
        draw_counter = int(self.get_argument('draw'))

        json_output = dict()
        json_output['draw'] = draw_counter

        # get the logs (but only the public ones)
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()

        cur.execute(
            'SELECT Id, Date, Description, WindSpeed, Rating, VideoUrl '
            'FROM Logs WHERE Public = 1 ORDER BY Date DESC')

        def get_columns_from_tuple(db_tuple, counter):
            """ load the columns (list of strings) from a db_tuple
            """
            db_data = DBData()
            log_id = db_tuple[0]
            log_date = db_tuple[1].strftime('%Y-%m-%d')
            db_data.description = db_tuple[2]
            db_data.feedback = ''
            db_data.type = ''
            db_data.wind_speed = db_tuple[3]
            db_data.rating = db_tuple[4]
            db_data.video_url = db_tuple[5]

            db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
            if db_data_gen is None:
                return None

            # bring it into displayable form
            ver_sw = db_data_gen.ver_sw
            if len(ver_sw) > 10:
                ver_sw = ver_sw[:6]
            if len(db_data_gen.ver_sw_release) > 0:
                try:
                    release_split = db_data_gen.ver_sw_release.split()
                    release_type = int(release_split[1])
                    if release_type == 255:  # it's a release
                        ver_sw = release_split[0]
                except:
                    pass
            airframe_data = get_airframe_data(db_data_gen.sys_autostart_id)
            if airframe_data is None:
                airframe = db_data_gen.sys_autostart_id
            else:
                airframe = airframe_data['name']

            flight_modes = ', '.join([
                flight_modes_table[x][0] for x in db_data_gen.flight_modes
                if x in flight_modes_table
            ])

            m, s = divmod(db_data_gen.duration_s, 60)
            h, m = divmod(m, 60)
            duration_str = '{:d}:{:02d}:{:02d}'.format(h, m, s)

            # make sure to break long descriptions w/o spaces (otherwise they
            # mess up the layout)
            description = html_long_word_force_break(db_data.description)

            return [
                counter,
                '<a href="plot_app?log=' + log_id + '">' + log_date + '</a>',
                description, db_data_gen.mav_type, airframe,
                db_data_gen.sys_hw, ver_sw, duration_str,
                db_data.rating_str(), db_data_gen.num_logged_errors,
                flight_modes
            ]

        # need to fetch all here, because we will do more SQL calls while
        # iterating (having multiple cursor's does not seem to work)
        db_tuples = cur.fetchall()
        json_output['recordsTotal'] = len(db_tuples)
        json_output['data'] = []
        if data_length == -1:
            data_length = len(db_tuples)

        filtered_counter = 0
        if search_str == '':
            # speed-up the request by iterating only over the requested items
            counter = len(db_tuples) - data_start + 1
            for i in range(data_start,
                           min(data_start + data_length, len(db_tuples))):
                counter -= 1

                columns = get_columns_from_tuple(db_tuples[i], counter)
                if columns is None:
                    continue

                json_output['data'].append(columns)
            filtered_counter = len(db_tuples)
        else:
            counter = len(db_tuples) + 1
            for db_tuple in db_tuples:
                counter -= 1

                columns = get_columns_from_tuple(db_tuple, counter)
                if columns is None:
                    continue

                if any(
                    [search_str in str(column).lower() for column in columns]):
                    if filtered_counter >= data_start and \
                        filtered_counter < data_start + data_length:
                        json_output['data'].append(columns)
                    filtered_counter += 1

        cur.close()
        con.close()

        json_output['recordsFiltered'] = filtered_counter

        self.set_header('Content-Type', 'application/json')
        self.write(json.dumps(json_output))
Exemplo n.º 28
0
    }),
    (r'/overview_img/(.*)', StaticFileHandler, {
        'path': get_overview_img_filepath()
    }),
]


# TODO: DON'T DO THIS
def _move_file_monkeypatch(self, path):
    shutil.copy(self.name, path)


if args.bulkupload:
    folder_path = os.path.abspath(args.bulkupload)
    if os.path.isdir(folder_path):
        con = sqlite3.connect(get_db_filename())
        cur = con.cursor()
        for root, dirs, files in os.walk(folder_path):
            for file_name in files:
                file_path = os.path.join(root, file_name)
                with open(file_path, 'r') as file:
                    # TODO: do actual validation here, don't just check filename
                    _, ext = os.path.splitext(file_name)
                    if ext not in ['.ulg', '.ulog']:
                        print(f'Skipping non-ULog file {file_path}')
                        continue
                    # TODO: PLEASE don't do this, make save_uploaded_log work with real file-like objects
                    file.move = types.MethodType(_move_file_monkeypatch, file)
                    file.get_filename = types.MethodType(
                        lambda self: file_name, file)
                    formdict = {}
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        backup_time = int(time.time() * 1000000)

        listing = []
        listing.append(("lastblock", tmp_lastblock_filename, config.get_lastblock_filename(impl=self.impl)))
        listing.append(("snapshots", tmp_snapshot_filename, config.get_snapshots_filename(impl=self.impl)))
        listing.append(("db", tmp_db_filename, config.get_db_filename(impl=self.impl)))

        for i in xrange(0, len(listing)):
            file_type, tmp_filename, filename = listing[i]

            dir_path = os.path.dirname(tmp_filename)
            dirfd = None
            try:
                dirfd = os.open(dir_path, os.O_DIRECTORY)
                os.fsync(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % dir_path)
                traceback.print_stack()
                os.abort()
Exemplo n.º 30
0
    def get(self, *args, **kwargs):
        """ GET request """
        search_str = self.get_argument('search[value]', '').lower()
        order_ind = int(self.get_argument('order[0][column]'))
        order_dir = self.get_argument('order[0][dir]', '').lower()
        data_start = int(self.get_argument('start'))
        data_length = int(self.get_argument('length'))
        draw_counter = int(self.get_argument('draw'))

        sim = False
        real = False
        if search_str == "sim":
            print("Search string is sim")
            sim = True
        if search_str == "real":
            print("Search string is real")
            real = True

        json_output = dict()
        json_output['draw'] = draw_counter

        # get the logs (but only the public ones)
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()

        sql_order = ' ORDER BY Date DESC'

        ordering_col = [
            '',  #table row number
            'Logs.Date',
            '',  #Overview - img
            'Logs.Description',
            'LogsGenerated.MavType',
            '',  #Airframe - not from DB
            'LogsGenerated.Hardware',
            'LogsGenerated.Software',
            'LogsGenerated.Duration',
            'LogsGenerated.StartTime',
            '',  #Rating
            'LogsGenerated.NumLoggedErrors',
            ''  #FlightModes
        ]
        if ordering_col[order_ind] != '':
            sql_order = ' ORDER BY ' + ordering_col[order_ind]
            if order_dir == 'desc':
                sql_order += ' DESC'

        cur.execute(
            'SELECT Logs.Id, Logs.Date, '
            '       Logs.Description, Logs.WindSpeed, '
            '       Logs.Rating, Logs.VideoUrl, '
            '       LogsGenerated.* '
            'FROM Logs '
            '   LEFT JOIN LogsGenerated on Logs.Id=LogsGenerated.Id '
            #                    'WHERE Logs.Public = 1 AND NOT Logs.Source = "CI" '        # this is commented out so both public and private show
            + sql_order)

        # pylint: disable=invalid-name
        Columns = collections.namedtuple("Columns",
                                         "columns search_only_columns")

        def get_columns_from_tuple(db_tuple, counter):
            """ load the columns (list of strings) from a db_tuple
            """

            db_data = DBDataJoin()
            log_id = db_tuple[0]
            log_date = db_tuple[1].strftime('%Y-%m-%d')
            db_data.description = db_tuple[2]
            db_data.feedback = ''
            db_data.type = ''
            db_data.wind_speed = db_tuple[3]
            db_data.rating = db_tuple[4]
            db_data.video_url = db_tuple[5]
            generateddata_log_id = db_tuple[6]
            if log_id != generateddata_log_id:
                print('Join failed, loading and updating data')
                db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
                if db_data_gen is None:
                    return None
                db_data.add_generated_db_data_from_log(db_data_gen)
            else:
                db_data.duration_s = db_tuple[7]
                db_data.mav_type = db_tuple[8]
                db_data.estimator = db_tuple[9]
                db_data.sys_autostart_id = db_tuple[10]
                db_data.sys_hw = db_tuple[11]
                db_data.ver_sw = db_tuple[12]
                db_data.num_logged_errors = db_tuple[13]
                db_data.num_logged_warnings = db_tuple[14]
                db_data.flight_modes = \
                    {int(x) for x in db_tuple[15].split(',') if len(x) > 0}
                db_data.ver_sw_release = db_tuple[16]
                db_data.vehicle_uuid = db_tuple[17]
                db_data.flight_mode_durations = \
                   [tuple(map(int, x.split(':'))) for x in db_tuple[18].split(',') if len(x) > 0]
                db_data.start_time_utc = db_tuple[19]

            # bring it into displayable form
            ver_sw = db_data.ver_sw
            if len(ver_sw) > 10:
                ver_sw = ver_sw[:6]
            if len(db_data.ver_sw_release) > 0:
                try:
                    release_split = db_data.ver_sw_release.split()
                    release_type = int(release_split[1])
                    if release_type == 255:  # it's a release
                        ver_sw = release_split[0]
                except:
                    pass
            airframe_data = get_airframe_data(db_data.sys_autostart_id)
            if airframe_data is None:
                airframe = db_data.sys_autostart_id
            else:
                airframe = airframe_data['name']

            flight_modes = ', '.join([
                flight_modes_table[x][0] for x in db_data.flight_modes
                if x in flight_modes_table
            ])

            m, s = divmod(db_data.duration_s, 60)
            h, m = divmod(m, 60)
            duration_str = '{:d}:{:02d}:{:02d}'.format(h, m, s)

            start_time_str = 'N/A'
            if db_data.start_time_utc != 0:
                start_datetime = datetime.fromtimestamp(db_data.start_time_utc)
                start_time_str = start_datetime.strftime("%Y-%m-%d  %H:%M")

            # make sure to break long descriptions w/o spaces (otherwise they
            # mess up the layout)
            description = html_long_word_force_break(db_data.description)

            search_only_columns = []

            if db_data.ver_sw is not None:
                search_only_columns.append(db_data.ver_sw)

            if db_data.ver_sw_release is not None:
                search_only_columns.append(db_data.ver_sw_release)

            if db_data.vehicle_uuid is not None:
                search_only_columns.append(db_data.vehicle_uuid)

            image_col = '<div class="no_map_overview"> Not rendered / No GPS </div>'
            image_filename = os.path.join(get_overview_img_filepath(),
                                          log_id + '.png')
            if os.path.exists(image_filename):
                image_col = '<img class="map_overview" src="/overview_img/'
                image_col += log_id + '.png" alt="Overview Image Load Failed" height=50/>'

            if sim:
                templog_id = log_id + "sim"
            elif real:
                templog_id = log_id + "real"
            else:
                templog_id = log_id

            return Columns([
                counter, '<a href="thiel_app?log=' + templog_id + 'desc:' +
                description + '">' + log_date + '</a>', image_col, description,
                db_data.mav_type, airframe, db_data.sys_hw, ver_sw,
                duration_str, start_time_str,
                db_data.rating_str(), db_data.num_logged_errors, flight_modes
            ], search_only_columns)

        # need to fetch all here, because we will do more SQL calls while
        # iterating (having multiple cursor's does not seem to work)
        db_tuples = cur.fetchall()
        json_output['recordsTotal'] = len(db_tuples)
        json_output['data'] = []
        if data_length == -1:
            data_length = len(db_tuples)

        filtered_counter = 0
        if search_str == '':
            # speed-up the request by iterating only over the requested items
            counter = data_start
            for i in range(data_start,
                           min(data_start + data_length, len(db_tuples))):
                counter += 1

                columns = get_columns_from_tuple(db_tuples[i], counter)
                if columns is None:
                    continue

                json_output['data'].append(columns.columns)
            filtered_counter = len(db_tuples)
        else:
            counter = 1
            for db_tuple in db_tuples:
                counter += 1

                columns = get_columns_from_tuple(db_tuple, counter)
                if columns is None:
                    continue

                if any([search_str in str(column).lower() for column in \
                        (columns.columns, columns.search_only_columns)]):
                    if data_start <= filtered_counter < data_start + data_length:
                        json_output['data'].append(columns.columns)
                    filtered_counter += 1

        cur.close()
        con.close()

        json_output['recordsFiltered'] = filtered_counter

        self.set_header('Content-Type', 'application/json')
        self.write(json.dumps(json_output))
Exemplo n.º 31
0
    def __init__(self, plot_config, verbose_output=False):

        self._config = plot_config

        self._verbose_output = verbose_output

        # lists of dates when a _log was uploaded, one list per type
        self._public_logs_dates = []
        self._web_ui_logs_dates = []
        self._qgc_logs_dates = []  # private uploads via QGC
        self._ci_logs_dates = []
        self._all_logs_dates = []

        self._public_logs = []

        # read from the DB
        con = sqlite3.connect(get_db_filename(),
                              detect_types=sqlite3.PARSE_DECLTYPES)
        with con:
            cur = con.cursor()

            cur.execute('select Id, Date, Source, Public, Rating from Logs')

            db_tuples = cur.fetchall()
            for db_tuple in db_tuples:
                log = _Log(db_tuple)

                self._all_logs_dates.append(log.date)
                if log.is_public == 1:
                    self._public_logs_dates.append(log.date)
                else:
                    if log.source == 'CI':
                        self._ci_logs_dates.append(log.date)
                    elif log.source == 'QGroundControl':
                        self._qgc_logs_dates.append(log.date)
                    else:
                        self._web_ui_logs_dates.append(log.date)

                # LogsGenerated: public only
                if log.is_public != 1:
                    continue

                cur.execute('select * from LogsGenerated where Id = ?',
                            [log.log_id])
                db_tuple = cur.fetchone()

                if db_tuple is None:
                    print("Error: no generated data")
                    continue

                log.set_generated(db_tuple)

                # filter bogus entries
                if log.sw_version == 'v0.0.0':
                    if self._verbose_output:
                        print('Warning: %s with version=v0.0.0' % log.log_id)
                    continue

                if log.sw_version == '':
                    # FIXME: does that still occur and if so why?
                    if self._verbose_output:
                        print('Warning: %s version not set' % log.log_id)
                    continue

                if log.autostart_id == 0:
                    print('Warning: %s with autostart_id=0' % log.log_id)
                    continue

                self._public_logs.append(log)

        self._version_data = {}  # dict of _VersionData items
        self._all_airframes = set()
        self._all_boards = set()
        self._all_ratings = set()
        self._all_flight_modes = set()
        self._total_duration = 0  # in hours, public logs only

        for log in self._public_logs:
            if not log.sw_version in self._version_data:
                self._version_data[log.sw_version] = _VersionData()

            self._all_airframes.add(str(log.autostart_id))
            self._all_boards.add(log.hardware)
            self._all_ratings.add(log.rating)

            cur_version_data = self._version_data[log.sw_version]
            boards = cur_version_data.boards
            airframes = cur_version_data.airframes
            airframes_num_logs = cur_version_data.airframes_num_logs
            ratings = cur_version_data.ratings
            flight_modes = cur_version_data.flight_mode_durations

            if not log.hardware in boards:
                boards[log.hardware] = 0
            boards[log.hardware] += log.duration / 3600.

            for flight_mode, duration in log.flight_mode_durations:
                flight_mode_str = str(flight_mode)
                self._all_flight_modes.add(flight_mode_str)
                if not flight_mode_str in flight_modes:
                    flight_modes[flight_mode_str] = 0.
                flight_modes[flight_mode_str] += duration / 3600.

            autostart_str = str(log.autostart_id)
            if not autostart_str in airframes:
                airframes[autostart_str] = 0
                airframes_num_logs[autostart_str] = 0
            airframes[autostart_str] += log.duration / 3600.
            airframes_num_logs[autostart_str] += 1

            if not log.rating in ratings:
                ratings[log.rating] = 0
            ratings[log.rating] += 1

            self._total_duration += log.duration / 3600.
Exemplo n.º 32
0
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        
        backup_time = int(time.time() * 1000000)
       
        listing = []
        listing.append( ("lastblock", tmp_lastblock_filename, config.get_lastblock_filename(impl=self.impl)) )
        listing.append( ("snapshots", tmp_snapshot_filename, config.get_snapshots_filename(impl=self.impl)) )
        listing.append( ("db", tmp_db_filename, config.get_db_filename(impl=self.impl)) )

        for i in xrange(0, len(listing)):
            file_type, tmp_filename, filename = listing[i]
            
            dir_path = os.path.dirname( tmp_filename )
            dirfd = None
            try:
                dirfd = os.open(dir_path, os.O_DIRECTORY)
                os.fsync(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % dir_path)
                traceback.print_stack()
                os.abort()