コード例 #1
0
def get_file(uuid, file_uuid_or_name):
    storage_dir = storage_dir_from_uuid(uuid)
    if saq.CONFIG['service_engine']['work_dir'] and not os.path.isdir(storage_dir):
        storage_dir = workload_storage_dir(uuid)

    root = RootAnalysis(storage_dir=storage_dir)
    root.load()

    # is this a UUID?
    try:
        validate_uuid(file_uuid_or_name)
        file_observable = root.get_observable(file_uuid_or_name)
        if file_observable is None:
            abort(Response("invalid file_uuid {}".format(file_uuid_or_name), 400))

    except ValueError:
        file_observable = root.find_observable(lambda o: o.type == F_FILE and o.value == file_uuid_or_name)
        if file_observable is None:
            abort(Response("invalid file name {}".format(file_uuid_or_name), 400))
        

    # NOTE we use an absolute path here because if we don't then
    # send_from_directory makes it relavive from the app root path
    # which is (/opt/ace/aceapi)

    target_path = os.path.join(saq.SAQ_HOME, root.storage_dir, file_observable.value)
    if not os.path.exists(target_path):
        abort(Response("file path {} does not exist".format(target_path), 400))

    # XXX revisit how we save (name) files
    return send_from_directory(os.path.dirname(target_path), 
                               os.path.basename(target_path), 
                               as_attachment=True,
                               attachment_filename=os.path.basename(target_path).encode().decode('latin-1', errors='ignore'))
コード例 #2
0
def clear(uuid, lock_uuid, db, c):

    validate_uuid(uuid)
    validate_uuid(lock_uuid)

    # make sure this uuid is locked with with the given lock_uuid
    # this is less a security feature than it is a mistake-blocker :-)
    c.execute("SELECT uuid FROM locks WHERE uuid = %s AND lock_uuid = %s", (uuid, lock_uuid))
    row = c.fetchone()
    if row is None:
        logging.warning("request to clear uuid {} with invalid lock uuid {}".format(uuid, lock_uuid))
        abort(Response("nope", 400))

    target_dir = storage_dir_from_uuid(uuid)
    if saq.CONFIG['engine']['work_dir'] and not os.path.isdir(target_dir):
        target_dir = workload_storage_dir(uuid)

    if not os.path.isdir(target_dir):
        logging.error("request to clear unknown target {}".format(target_dir))
        abort(Response("unknown target {}".format(target_dir)))

    logging.info("received request to clear {} from {}".format(uuid, request.remote_addr))
    
    try:
        shutil.rmtree(target_dir)
    except Exception as e:
        logging.error("unable to clear {}: {}".format(target_dir, e))
        report_exception()
        abort(Response("clear failed"))

    # looks like it worked
    return json_result({'result': True})
コード例 #3
0
ファイル: __init__.py プロジェクト: krayzpipes/ACE
def download(uuid):

    validate_uuid(uuid)

    target_dir = storage_dir_from_uuid(uuid)
    if not os.path.isdir(target_dir):
        logging.error(
            "request to download unknown target {}".format(target_dir))
        abort(make_response("unknown target {}".format(target_dir), 400))
        #abort(Response("unknown target {}".format(target_dir)))

    logging.info("received request to download {} to {}".format(
        uuid, request.remote_addr))

    # create the tar file we're going to send back
    fp, path = tempfile.mkstemp(prefix="download_{}".format(uuid),
                                suffix='.tar',
                                dir=saq.TEMP_DIR)

    try:
        tar = tarfile.open(fileobj=os.fdopen(fp, 'wb'), mode='w|')
        tar.add(target_dir, '.')
        tar.close()

        os.lseek(fp, 0, os.SEEK_SET)

        def _iter_send():
            while True:
                data = os.read(fp, io.DEFAULT_BUFFER_SIZE)
                if data == b'':
                    raise StopIteration()
                yield data

        return Response(_iter_send(), mimetype='application/octet-stream')

    finally:
        try:
            os.remove(path)
        except:
            pass
コード例 #4
0
def get_status(uuid):

    try:
        validate_uuid(uuid)
    except ValueError as e:
        abort(Response(str(e), 400))

    storage_dir = storage_dir_from_uuid(uuid)
    if saq.CONFIG['engine']['work_dir'] and not os.path.isdir(storage_dir):
        storage_dir = workload_storage_dir(uuid)

    if not os.path.exists(storage_dir):
        abort(Response("invalid uuid {}".format(uuid), 400))

    result = {
        'workload': None,
        'delayed_analysis': [],
        'locks': None,
        'alert': None
    }

    with get_db_connection() as db:
        c = db.cursor()

        # is this still in the workload?
        c.execute(
            """
SELECT 
    id, 
    uuid, 
    node_id, 
    analysis_mode, 
    insert_date
FROM
    workload
WHERE
    uuid = %s
""", (uuid, ))
        row = c.fetchone()
        if row is not None:
            result['workload'] = {
                'id': row[0],
                'uuid': row[1],
                'node_id': row[2],
                'analysis_mode': row[3],
                'insert_date': row[4]
            }

        # is this an alert?
        c.execute(
            """
SELECT 
    id, 
    uuid,
    location,
    insert_date,
    storage_dir,
    disposition,
    disposition_time,
    detection_count
FROM
    alerts
WHERE
    uuid = %s
""", (uuid, ))
        row = c.fetchone()
        if row is not None:
            result['alert'] = {
                'id': row[0],
                'uuid': row[1],
                'location': row[2],
                'insert_date': row[3],
                'storage_dir': row[4],
                'disposition': row[5],
                'disposition_time': row[6],
                'detection_count': row[7]
            }

        # is there any delayed analysis scheduled for it?
        c.execute(
            """
SELECT
    id,
    uuid,
    observable_uuid,
    analysis_module,
    insert_date,
    delayed_until,
    node_id
FROM
    delayed_analysis
WHERE
    uuid = %s
ORDER BY
    delayed_until
""", (uuid, ))
        for row in c:
            result['delayed_analysis'].append({
                'id': row[0],
                'uuid': row[1],
                'observable_uuid': row[2],
                'analysis_module': row[3],
                'insert_date': row[4],
                'delayed_until': row[5],
                'node_id': row[6]
            })

        # are there any locks on it?
        c.execute(
            """
SELECT
    uuid,
    lock_uuid,
    lock_time,
    lock_owner
FROM
    locks
WHERE
    uuid = %s
""", (uuid, ))
        row = c.fetchone()
        if row is not None:
            result['locks'] = {
                'uuid': row[0],
                'lock_uuid': row[1],
                'lock_time': row[2],
                'lock_owner': row[3]
            }

    return json_result({'result': result})
コード例 #5
0
def upload(uuid):
    
    validate_uuid(uuid)

    if KEY_UPLOAD_MODIFIERS not in request.values:
        abort(Response("missing key {} in request".format(KEY_UPLOAD_MODIFIERS), 400))

    if KEY_ARCHIVE not in request.files:
        abort(Response("missing files key {}".format(KEY_ARCHIVE), 400))

    upload_modifiers = json.loads(request.values[KEY_UPLOAD_MODIFIERS])
    if not isinstance(upload_modifiers, dict):
        abort(Response("{} should be a dict".format(KEY_UPLOAD_MODIFIERS), 400))

    overwrite = False
    if KEY_OVERWRITE in upload_modifiers:
        overwrite = upload_modifiers[KEY_OVERWRITE]
        if not isinstance(overwrite, bool):
            abort(Response("{} should be a boolean".format(KEY_OVERWRITE), 400))

    sync = False
    if KEY_SYNC in upload_modifiers:
        sync = upload_modifiers[KEY_SYNC]
        if not isinstance(sync, bool):
            abort(Response("{} should be a boolean".format(KEY_SYNC), 400))

    logging.info("requested upload for {}".format(uuid))

    # does the target directory already exist?
    target_dir = storage_dir_from_uuid(uuid)
    if os.path.exists(target_dir):
        # are we over-writing it?
        if not overwrite:
            abort(Response("{} already exists (specify overwrite modifier to replace the data)".format(target_dir), 400))

        # if we are overwriting the entry then we need to completely clear the 
        # TODO implement this

    try:
        os.makedirs(target_dir)
    except Exception as e:
        logging.error("unable to create directory {}: {}".format(target_dir, e))
        report_exception()
        abort(Response("unable to create directory {}: {}".format(target_dir, e), 400))

    logging.debug("target directory for {} is {}".format(uuid, target_dir))

    # save the tar file so we can extract it
    fp, tar_path = tempfile.mkstemp(suffix='.tar', prefix='upload_{}'.format(uuid), dir=saq.TEMP_DIR)
    os.close(fp)

    try:
        request.files[KEY_ARCHIVE].save(tar_path)

        t = tarfile.open(tar_path, 'r|')
        t.extractall(path=target_dir)

        logging.debug("extracted {} to {}".format(uuid, target_dir))

        # update the root analysis to indicate it's new location 
        root = RootAnalysis(storage_dir=target_dir)
        root.load()

        root.location = saq.SAQ_NODE
        root.company_id = saq.COMPANY_ID
        root.company_name = saq.COMPANY_NAME

        root.save()

        if sync:
            root.schedule()

        # looks like it worked
        return json_result({'result': True})

    except Exception as e:
        logging.error("unable to upload {}: {}".format(uuid, e))
        report_exception()
        abort(Response("unable to upload {}: {}".format(uuid, e)))

    finally:
        try:
            os.remove(tar_path)
        except Exception as e:
            logging.error("unable to remove {}: {}".format(tar_path,e ))