Beispiel #1
0
def main():
    ATM(
        menu,
        Accounts(
            Table(
                File('accounts.txt')
            )
        ),
        History(
            Table(
                File('transactions.txt')
            )
        )
    ).start()
Beispiel #2
0
def upload():
    if 'file' not in request.files:
        flash('No file part')
        return redirect(request.url)
    f = request.files['file']
    if f.filename == '':
        flash('No selected file')
        return redirect(request.url)
    if f:
        filename = secure_filename(f.filename)
        f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        name = request.form['name']
        exited = File.query.filter_by(name=name).first()
        if exited:
            # 更新文件
            exited.path = filename
            db.session.commit()
        else:
            # 创建一条记录
            file = File()
            file.name = name
            file.path = filename
            db.session.add(file)
            db.session.commit()
    return redirect(url_for('manage'))
Beispiel #3
0
def _save_image(db_session, scrape_result):
    """ Save the image returned by Splash to a local file. """
    if scrape_result['error'] is None:
        image_name = '{}.jpg'.format(scrape_result['site']['name'])
        content = base64.decodestring(scrape_result['image'].encode('utf8'))
        image_file = File(name=image_name,
                          mime='image/jpeg',
                          content=content)
        db_session.add(image_file)

        try:
            db_session.commit()
        except:
            db_session.rollback()
            raise ScrapeException('Could not save image')
    else:
        # Get the generic error image.
        image_file = (
            db_session
            .query(File)
            .filter(File.name == 'hgprofiler_error.png')
            .one()
        )

    return image_file
Beispiel #4
0
def _save_image(db_session, scrape_result, user_id, censor=False):
    """ Save the image returned by Splash to a local file. """
    if scrape_result['error'] is None and censor is True:
        # Get the generic censored image.
        image_file = (db_session.query(File).filter(
            File.name == _censored_image_name).one())
    elif scrape_result['error'] is None:
        image_name = '{}.jpg'.format(scrape_result['site']['name'].replace(
            ' ', ''))
        content = base64.decodestring(scrape_result['image'].encode('utf8'))
        image_file = File(name=image_name,
                          mime='image/jpeg',
                          content=content,
                          user_id=user_id)
        db_session.add(image_file)

        try:
            db_session.commit()
        except:
            db_session.rollback()
            raise ScrapeException('Could not save image')
    else:
        # Get the generic error image.
        image_file = (db_session.query(File).filter(
            File.name == _error_image_name).one())

    return image_file
Beispiel #5
0
def upload_file():
    user = g.user
    file = request.files['file']
    resp = {}
    resp['filename'] = ''
    msg = 'Upload successful'
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        new_file = str(user.id) + '_' + user.username + '_' + str(
            int(time.time())) + '.' + filename.split('.')[1]
        basepath = os.path.dirname(__file__)
        if (user.role == 1):
            upload_path = os.path.join(basepath,
                                       app.config['UPLOAD_FOLDER_STU'],
                                       new_file)
        else:
            upload_path = os.path.join(basepath,
                                       app.config['UPLOAD_FOLDER_TEC'],
                                       new_file)
        file_url = upload_path
        file.save(upload_path)
        resp['filename'] = file_url.split('/')[-1]
        f = File(user_id=user.id, url=file_url, name=resp['filename'])
        db.session.add(f)
        db.session.commit()
    else:
        msg = 'Filename limitation'
    resp['msg'] = msg
    return get_response(resp)
Beispiel #6
0
 def get_by_doc_id(cls, did):
     sql = """select * from file where doc_id=?"""
     data = (did, )
     rows = cls.db.query(sql, data)
     records = []
     for row in rows:
         print(row)
         records.append(File(row[0], row[1], row[2], row[3]))
     return records
Beispiel #7
0
 def post(self):
     parser = reqparse.RequestParser()
     args = parser.parse_args()
     uploaded_file = request.files['file']
     filename = str(
         uuid.uuid4()) + '.' + uploaded_file.filename.split('.')[-1]
     print(uploaded_file.filename)
     uploaded_file.save(os.path.join('files', filename))
     file = File(filename)
     session.add(file)
     session.commit()
     return file.id, 201
Beispiel #8
0
 def file_put(self, _uname, _fname, _obj):
     _query = self.file_query("first", _uname, _fname)
     if _query is None:
         add_row = File()
         add_row.name = u"" + _fname
         add_row.pull_id = self.pull_add(_obj)
         add_row.user_id = self.user_query("one", _uname).id
         self.__session.add(add_row)
         self.__session.commit()
         return True
     else:
         return False
Beispiel #9
0
def insertFile(fname, filePerm, size, uId, pFolderId):
    try:
        newFile = File(fileName=fname,
                       filePerm=filePerm,
                       size=size,
                       uId=uId,
                       pFolderId=pFolderId)
        db.session.add(newFile)
        db.session.commit()
        return newFile
    except SQLAlchemyError as e:
        print(e)
        return False
Beispiel #10
0
    def __submit(self, filename) -> None:
        try:
            Validation.is_file(filename)
            Validation.has_extension(filename)
        except ValidationException.MissingExtensionError:
            FileEventHandler.__LOG.debug(
                f"[SKIPPING] file '{filename}': no extension")
            return
        except FileNotFoundError:
            FileEventHandler.__LOG.debug(
                f"[SKIPPING] file '{filename}': no regular file")
            return

        self.__executor.submit(self.__dispatcher.execute, File(filename))
Beispiel #11
0
 def show(self):
     while True:
         window = self.window
         event, values = window.read()
         if event == "submit":
             file = File(None, values['name'], values['path'], self.doc.id) 
             fn = self.make_file_on_disk(self.person, self.doc, file)
             if fn:
                 file.path = fn
             self.service.add_file(file)
             break
         elif event == "reset":
             window.Element("name").Update(value= "")
             window.Element("doc").Update(value= "")
         else:
             break
     window.Close()
def adding(upload, filename, datetime):
    user = user_operations.get_user_for_model()
    current_dir = obj_of_current_dir()
    id = user.key.id() + getting_path(filename, current_dir)
    key = ndb.Key("File", id)

    if nocontain(key, current_dir.files):
        object = File(id=id)
        object.name = filename
        object.date = datetime
        object.blob = upload.key()
        object.put()
        current_dir.files.append(key)
        current_dir.put()
        return "file added!"
    else:
        blobstore.delete(upload.key())
        return "A file with this name already exists in this directory!"
Beispiel #13
0
def create_zip(filename, results, user_id):
    '''
    Generate zip archive of results and return the file id.

    Adds screenshots and HTML for found results.
    Adds csv result summary.
    '''

    db_session = worker.get_session()
    files = []
    str_files = []

    # Get images and HTML
    for result in results:
        if result.status == 'f':
            # Add the image file
            files.append((result.image_file.name, result.image_file.relpath()))
            # Add the HTML as a string file
            html_filename = '{}.html'.format(result.site_name.replace(' ', ''))
            html_file = (html_filename, result.html)
            str_files.append(html_file)

    # Generate in-memory results csv
    csv_string = results_csv_string(results)
    str_file = ('{}.csv'.format(filename), csv_string)
    str_files.append(str_file)

    zip_file = File(name='{}.zip'.format(filename),
                    mime='application/zip',
                    zip_archive=True,
                    zip_files=files,
                    zip_str_files=str_files,
                    user_id=user_id)

    db_session.add(zip_file)

    try:
        db_session.commit()
    except Exception as e:
        raise ArchiveException(e)

    return zip_file.id
Beispiel #14
0
    def __do_fetch(self, directory: str):
        for file in sorted(os.listdir(directory)):
            if file in self.__excluded:
                continue

            file_path = os.path.join(directory, file)
            if os.path.isdir(file_path):
                results = [
                    child_file for child_file in self.__do_fetch(file_path)
                ]
            else:
                results = [file_path]

            # Return the files
            for fp in results:
                if isinstance(fp, File):
                    yield fp
                    continue

                yield File(fp)
Beispiel #15
0
 def file_add(self, _name, _prog, _ver, _arch, _job=constant.init["job"]):
     # test version for file
     try:
         _vquery = self.ver_query("one", _prog, _ver)
     except:
         return False
     else:
         # find fle
         _query = self.file_query("all", _name, _prog, _ver, False, _arch)
         if _query == []:
             ex = _name.split(".")[-1]
             _type = constant.ftype[ex]
             add_row = File()
             add_row.name = u"" + _name
             add_row.ver_id = _vquery.id
             add_row.const_type = u"" + _type
             add_row.const_arch = u"" + _arch
             add_row.job_id = self.job_query("one", _job).id
             self.__session.add(add_row)
             self.__session.commit()
         return True
Beispiel #16
0
def list_or_retr(path):
    conn = get_conn()
    path = FTPClientConnection.to_full_path(path)
    # Do list if the arg indicates directory:
    if path[-1] == '/':
        conn.cwd(path)
        data = conn.list().split('\r\n')
        file_list = []
        for line in data:
            if not line:
                continue
            print(line)
            f = File(line)
            file_list.append(f.dict)
        return return_json(file_list)
    else:
        directory, file = FTPClientConnection.split_dir(path)
        print(directory)
        conn.cwd(directory)
        return Response(conn.direct_retr(file),
                        mimetype='application/octet-stream')
Beispiel #17
0
    def _create_fixture_images(self, config):
        '''
        Create the generic error image.

        Since this script will often run as root, it modifies the owner of the
        new file to match the owner of the data directory.
        '''

        session = app.database.get_session(self._db)

        image_name = 'hgprofiler_error.png'
        data_stat = os.stat(get_path('data'))
        img_path = os.path.join(get_path('static'), 'img', image_name)
        with open(img_path, 'rb') as img:
            img_data = img.read()
            image_file = File(name=image_name,
                              mime='image/png',
                              content=img_data)
            image_file.chown(data_stat.st_uid, data_stat.st_gid)
            session.add(image_file)

        session.commit()
Beispiel #18
0
    def create_object(self, obj):
        file = request.files['file']
        file.stream.seek(0, os.SEEK_END)
        fsize = file.tell()
        if fsize == 0:
            raise BadRequest("Uploaded file is empty")

        sha256 = calc_hash(file.stream, hashlib.sha256(),
                           lambda h: h.hexdigest())

        file.stream.seek(0, os.SEEK_SET)
        fmagic = magic.from_buffer(file.stream.read())

        # Create file first so we can add it without worrying about race conditions thanks to get_or_create
        db_file = File()
        db_file.file_name = secure_filename(request.files['file'].filename)
        db_file.file_size = fsize
        db_file.file_type = fmagic
        db_file.parents = []
        db_file.crc32 = crc32_sum(file.stream)
        db_file.md5 = calc_hash(file.stream, hashlib.md5(),
                                lambda h: h.hexdigest())
        db_file.sha1 = calc_hash(file.stream, hashlib.sha1(),
                                 lambda h: h.hexdigest())
        db_file.sha256 = sha256
        db_file.dhash = sha256
        db_file.sha512 = calc_hash(file.stream, hashlib.sha512(),
                                   lambda h: h.hexdigest())
        db_file.humanhash = Humanhash._humanhash(sha256)
        db_file.ssdeep = calc_hash(file.stream, ssdeep.Hash(),
                                   lambda h: h.digest())
        db_file.upload_time = datetime.now()

        if app_config.malwarecage.enable_maintenance and g.auth_user.login == app_config.malwarecage.admin_login:
            db_file.upload_time = obj.data.get("upload_time", datetime.now())

        db_file, is_file_new = File.get_or_create(db_file, file)

        return db_file, is_file_new
Beispiel #19
0
def create_zip(filename, results):
    '''
    Generate zip archive of results and return the file id.

    Adds all images for results that have screenshots.
    Adds csv result summary created on the fly (as IOString).
    '''

    db_session = worker.get_session()
    files = []
    str_files = []

    # Create list of images
    for result in results:
        # Add the name to results for the csv output
        files.append((result.image_file.name, result.image_file.relpath()))

    # Generate in-memory results csv
    csv_string = results_csv_string(results)
    str_file = ('{}.csv'.format(filename), csv_string)
    str_files.append(str_file)

    zip_file = File(name='{}.zip'.format(filename),
                    mime='application/zip',
                    zip_archive=True,
                    zip_files=files,
                    zip_str_files=str_files)

    db_session.add(zip_file)

    try:
        db_session.commit()
    except Exception as e:
        raise ArchiveException(e)

    return zip_file.id
Beispiel #20
0
def scrape_instagram_posts(id_, recent):
    """
    Fetch instagram posts for the user identified by id_.
    Checks posts already stored in db, and will only fetch older or newer
    posts depending on value of the boolean argument 'recent',
    e.g. recent=True will return recent posts not already stored in the db.
    The number of posts to fetch is configured in the Admin.
    """
    redis = worker.get_redis()
    db = worker.get_session()
    author = db.query(Profile).filter(Profile.id == id_).first()
    proxies = _get_proxies(db)
    max_results = get_config(db, 'max_posts_instagram', required=True).value
    try:
        max_results = int(max_results)
    except:
        raise ScrapeException('Value of max_posts_instagram must be an integer')

    min_id = None
    results = 0
    params = {}

    if author is None:
        raise ValueError('No profile exists with id={}'.format(id_))

    url = 'https://api.instagram.com/v1/users/{}/media/recent' \
          .format(author.upstream_id)

    # Get last post currently stored in db for this profile.
    post_query = db.query(Post) \
        .filter(Post.author_id == id_) \
        .order_by(Post.upstream_created.desc()) \

    if post_query.count() > 0:
        # Only fetch posts newer than those already stored in db
        if recent:
            min_id = post_query[0].upstream_id
            params['min_id'] = str(min_id)
        # Only fetch posts older than those already stored in db
        else:
            max_id = post_query[post_query.count() - 1].upstream_id
            params['max_id'] = str(max_id)

    worker.start_job(total=max_results)
    while results < max_results:
        response = requests.get(
            url,
            params=params,
            proxies=proxies,
            verify=False
        )

        response.raise_for_status()
        post_ids = list()
        response_json = response.json()['data']
        pagination = response.json()['pagination']

        # Instagram API result includes post with min_id so remove it
        response_json[:] = [d for d in response_json if d.get('id') != min_id]

        for gram in response_json:
            if gram['caption'] is not None:
                text = gram['caption']['text']
            else:
                text = None

            post = Post(
                author,
                gram['id'],
                datetime.fromtimestamp(int(gram['created_time'])),
                text
            )

            if gram['location'] is not None:
                if 'latitude' in gram['location']:
                    post.latitude = gram['location']['latitude']
                    post.longitude = gram['location']['longitude']

                if 'name' in gram['location']:
                    post.location = gram['location']['name']

                    if 'street_address' in gram['location']:
                        post.location += ' ' + gram['location']['street_address']

            if 'images' in gram:
                image_url = gram['images']['standard_resolution']['url']
                name = os.path.basename(urlparse(image_url).path)
                img_response = requests.get(image_url, verify=False)
                mime = img_response.headers['Content-type']
                image = img_response.content
                post.attachments.append(File(name, mime, image))

            db.add(post)
            db.flush()
            post_ids.append(post.id)
            worker.update_job(current=results)
            results += 1
            if results == max_results:
                break

        # If there are more results, set the max_id param, otherwise finish
        if 'next_max_id' in pagination:
            params['max_id'] = pagination['next_max_id']
        else:
            break

    db.commit()
    worker.finish_job()
    redis.publish('profile_posts', json.dumps({'id': id_}))
    app.queue.schedule_index_posts(post_ids)
Beispiel #21
0
def upload_file(billId):
    start = time.time()
    bill_id = billId
    username = request.authorization.username
    passwordinfo = request.authorization.password
    bill_sc = Billschema(many=False)
    data1 = request.get_json()
    dbtime = time.time()
    flag = checkauthentication(username, passwordinfo)
    dur = (time.time() - dbtime) * 1000
    c.timing("dbconnect", dur)

    if flag == True:  #check if user exits
        result = Credential.select_user_by_email(username)
        user_sc = Credentialschema()

        data = user_sc.dump(result)
        owner_id = data.get('id')

        dbtime = time.time()
        result2 = Bills.select_user_by_billid(bill_id)

        dur = (time.time() - dbtime) * 1000
        c.timing("dbconnect", dur)
        bill_sc = Billschema(many=False)

        data2 = bill_sc.dump((result2))

        owner_id2 = data2.get('owner_id')

        if owner_id == owner_id2:  #authorized against bill and user
            # checking  if the  request has the file part

            file = request.files['file']
            #
            if 'file' not in request.files:
                return custom_http_code('No file part in the request', 400)
            elif file.filename == '':
                return custom_http_code('No file part in the request', 400)
            elif file and allowed_file(file.filename):
                result = File.select_file_by_billid(bill_id)
                print(result)
                if result:
                    return custom_http_code(
                        "file already exists with bill delete first", 400)
                filename = secure_filename(file.filename)
                id = str(uuid.uuid4().hex)
                dir = "attachments" + "/" + id
                # os.mkdir(dir)
                target = os.path.join(root_dir, dir)
                print(target)
                if not os.path.isdir(target):
                    os.mkdir(target)
                else:
                    return custom_http_code("file already exists", 400)
                destination_folder = "/".join([target, filename])
                file.seek(0, os.SEEK_END)
                file_len = file.tell()
                img_key = hashlib.md5(file.read()).hexdigest()
                obj = file.save(destination_folder)
                #file = request.files['file']
                object_name = id + "/" + file.filename
                s3_client = boto3.client('s3')
                name = 'attachments/' + id + '/' + filename
                #fileobj= open(name,'r')
                #obj=file.save(destination_folder)
                file = request.files['file']

                dbtime = time.time()
                uploading = s3_client.upload_fileobj(file, bucket, object_name)
                #obj=file.save(destination_folder)

                dur = (time.time() - dbtime) * 1000
                c.timing("s3time", dur)

                url = 'https://s3.console.aws.amazon.com/' + bucket + "/attachments/" + id + "/" + filename
                upload_date = datetime.datetime.today().strftime('%Y-%m-%d')
                # img_key = hashlib.md5(file.read()).hexdigest()
                #     print(img_key.encode("utf-8"))

                dbtime = time.time()
                new_bill = File(id, bill_id, filename, upload_date, url,
                                file_len, img_key)
                db.create_all()
                db.session.add(new_bill)
                db.session.commit()

                dur = (time.time() - dbtime) * 1000
                c.timing("dbconnect", dur)
                #   result=Credential.query.filter_by(first_name='Jane').first()
                file_sc = File_schema_output(many=False)
                result = File.select_file_by_file_id(id)
                print(result)
                data = file_sc.dump(result)
                print(data)

                # bill_schema= Billschema(many=False)
                # data= Bills.select_user_by_billid(billid)
                #   query_result = bill_schema.dump(data)
                #file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

                c.incr("postfilecount")
                dur = (time.time() - start) * 1000
                c.timing("postfilecount", dur)
                return custom_http_code(data, 201)

            else:

                c.incr("postfilecount")
                dur = (time.time() - start) * 1000
                c.timing("postfilecount", dur)
                return custom_http_code('wrong file extension', 400)
        else:

            c.incr("postfilecount")
            dur = (time.time() - start) * 1000
            c.timing("postfilecount", dur)
            return custom_http_code('Unauthorised', 401)

    else:
        return custom_http_code('invalid login', 401)
Beispiel #22
0
    def _create_sample_profiles(self, config):
        ''' Create some sample profiles. '''

        session = app.database.get_session(self._db)
        sample_dir = os.path.join(os.path.dirname(__file__), 'sample-data')

        # Maurice Moss
        moss_twitter = Profile(site='twitter',
                               upstream_id='12345',
                               username=ProfileUsername(
                                   'maurice.moss', start_date='2014-04-01'))

        moss_twitter.usernames.append(
            ProfileUsername('maurice',
                            start_date='2013-06-01',
                            end_date='2014-03-31'))

        moss_twitter.usernames.append(
            ProfileUsername('maurie',
                            start_date='2013-02-15',
                            end_date='2013-05-30'))

        Post(author=moss_twitter,
             content='Going to the grocery store.',
             upstream_id='1234',
             upstream_created='2015-02-04 12:34:50')

        post = Post(author=moss_twitter,
                    content='Love this band!.',
                    upstream_id='2345',
                    upstream_created='2015-03-01')

        post.attachments.append(
            File(name='helloworld.txt',
                 mime='text/plain',
                 content='Hello world!\n\n'.encode('utf8')))

        moss_twitter.posts.append(post)

        with open(os.path.join(sample_dir, 'moss.jpg'), 'rb') as moss_jpg:
            moss_twitter.avatars.append(
                Avatar(url='http://foobar.com/moss-avatar.jpg',
                       mime='image/jpeg',
                       image=moss_jpg.read()))

        moss_twitter.description = "I do IT at Reynholm Industries."
        moss_twitter.post_count = 1205
        moss_twitter.friend_count = 1
        moss_twitter.follower_count = 3
        moss_twitter.join_date = dateutil.parser.parse('2013-06-01')
        moss_twitter.join_date_is_exact = False

        session.add(moss_twitter)

        # Jen Barber
        jen_twitter = Profile(site='twitter',
                              upstream_id='23456',
                              username=ProfileUsername(
                                  'jen.barber', start_date='2013-11-12'))

        jen_twitter.usernames.append(
            ProfileUsername('jenb',
                            start_date='2013-06-14',
                            end_date='2013-11-12'))

        jen_twitter.usernames.append(
            ProfileUsername('jenny',
                            start_date='2013-03-15',
                            end_date='2013-06-14'))

        with open(os.path.join(sample_dir, 'jen.jpg'), 'rb') as jen_jpg:
            jen_twitter.avatars.append(
                Avatar(url='http://foobar.com/jen-avatar.jpg',
                       mime='image/jpeg',
                       image=jen_jpg.read()))

        jen_twitter.description = "Relationship Manager for the IT department."
        jen_twitter.post_count = 1543
        jen_twitter.friend_count = 1
        jen_twitter.follower_count = 1
        jen_twitter.join_date = dateutil.parser.parse('2013-03-15')
        jen_twitter.join_date_is_exact = True

        moss_twitter.followers.append(jen_twitter)

        session.add(jen_twitter)

        # A couple of randos.
        moss_twitter.followers.append(
            Profile(site='twitter', upstream_id='345678', username='******'))

        moss_twitter.followers.append(
            Profile(site='twitter', upstream_id='456789', username='******'))

        jen_twitter.followers.append(
            Profile(site='twitter', upstream_id='567890', username='******'))

        session.commit()
Beispiel #23
0
def calculate_sums(session, message, tmpdir):
    """ Extract the content of the file extracted from the fedmsg message
    and browse the sources of the specified package and for each of the
    files in the sources get their sha256sum, sha1sum, and md5sum.
    """

    local_filename = os.path.join(tmpdir, message['filename'])

    if not os.path.exists(local_filename):
        raise IOError('File %s not found' % local_filename)

    if local_filename.endswith('.gem'):
        cmd = ['rpmdev-extract', '-C', tmpdir, local_filename]
        proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
        proc.communicate()
        # Remove not-used files
        os.unlink(os.path.join(tmpdir, 'metadata.gz'))
        os.unlink(os.path.join(tmpdir, 'checksums.yaml.gz'))
        # Remove original sources - we only keep the data archive
        os.unlink(local_filename)
        local_filename = os.path.join(tmpdir, 'data.tar.gz')

    if zipfile.is_zipfile(local_filename):
        if local_filename.endswith('.jar') or local_filename.endswith('.war'):
            log.warning('Invalid sources uploaded: %r - package: %r' %
                        (local_filename, message.get('name')))
            return {'invalid': local_filename}

    cmd = ['rpmdev-extract', '-C', tmpdir, local_filename]
    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
    if proc.returncode:
        raise IOError('Something went wrong when extracting %s' %
                      local_filename)

    filename = proc.communicate()[0].split('\n')
    # output from zip archives
    if 'Archive:' in filename[0] and 'creating:' in filename[1]:
        filename = filename[1].split('creating:')[1].strip()
    else:
        filename = filename[0]

    if filename and '/' in filename:
        filename = filename.split('/')[0]
        filename = os.path.join(tmpdir, filename)
    else:
        log.warning("No folder extracted from %r" % local_filename)
        filename = tmpdir

    if local_filename and os.path.exists(local_filename):
        os.unlink(local_filename)

    count, stored = 0, 0
    for fname, sha256sum, sha1sum, md5sum in walk_directory(filename):
        count = count + 1
        pkgobj = File.exists(session, message['md5sum'], fname)
        fname = fname.replace(tmpdir, '')
        if not pkgobj:
            pkgobj = File(pkg_name=message['name'],
                          filename=fname,
                          sha256sum=sha256sum,
                          sha1sum=sha1sum,
                          md5sum=md5sum,
                          tar_file=message['filename'],
                          tar_sum=message['md5sum'])
            session.add(pkgobj)
            stored = stored + 1
        else:
            pass
    session.commit()

    log.info("Stored %i of %i files" % (stored, count))