示例#1
0
def upload_to_b2_bucket(filepath, bucket_name, prefix):
    """
    Upload backup to bucket.

    Args:
        filepath (str):  The path of the file to upload.
        bucket_name (str):  The name of the bucket to upload to.
    """
    from b2blaze import B2

    # get the keys
    assert 'B2_KEY_ID' in os.environ
    assert 'B2_APPLICATION_KEY' in os.environ

    b2 = B2()

    bucket_names = list(map(lambda x: x.bucket_name, b2.buckets.all()))
    assert bucket_name in bucket_names, \
        'Bucket {} not in {}'.format(bucket_name, bucket_names)
    bucket = b2.buckets.get(bucket_name)

    if len(prefix.strip()) > 0 and prefix[:-1] != '/':
        prefix = prefix.strip() + '/'

    with open(filepath, 'rb') as f:
        bucket.files.upload(contents=f,
                            file_name=prefix + os.path.basename(filepath))
示例#2
0
def upload_to_b2(tar_file, db_name):
    b2 = B2(b2_key_id, b2_app_key)
    bucket = b2.buckets.get(b2_bucket)
    f1 = open(tar_file, "rb")
    new_file = bucket.files.upload(
        contents=f1, file_name=db_name + "/" + tar_file, progress_listener=print
    )
    return new_file
示例#3
0
def alert_face_detection(faces, image):
    for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2)

    face_one = faces[0]

    (x, y, w, h) = face_one

    cv2.imwrite('result.jpg', image)
    b2 = B2(key_id=b2_id, application_key=b2_app_key)
    bucket = b2.buckets.get('monitors')

    image_file = open('result.jpg', 'rb')
    new_file = bucket.files.upload(contents=image_file,
                                   file_name='capture/result.jpg')
    print(new_file.url)
    image_file.close()

    client = Client(account_sid, auth_token)
    message = client.messages.create(to='+14109806647',
                                     from_='+15014564510',
                                     media_url=[new_file.url],
                                     body="I detected a face in the basement")
示例#4
0
    def object_detect(task_data):
      node_name      = task_data['name']
      node_video_url = task_data['url']
      node_type      = task_data['type']
      check_all_frames = False

      # fetch from the node's ip the video file
      # use opencv to do object detection
      if node_type == 'jpg':
        downloaded_video_path = "/tmp/video.jpg"
        check_all_frames = True
      else:
        downloaded_video_path = "/tmp/video.mp4"

      with urllib.request.urlopen(node_video_url) as response:
        download_file_handle = open(downloaded_video_path, "wb")
        download_file_handle.write(response.read())
        download_file_handle.close()

      # hacks to convert the jpg to a single frame video so rest of code stays the same
      if node_type == 'jpg':
        os.system("ffmpeg -y -i /tmp/video.jpg /tmp/video.mp4")
        downloaded_video_path = "/tmp/video.mp4"

      fourcc = cv2.VideoWriter_fourcc(*'mp4v')
      cap = cv2.VideoCapture(downloaded_video_path)
      net = cv2.dnn.readNetFromCaffe(ObjDetect_prototxt, ObjDetect_weights)
      out = None;

      detected_human = 0
      detected_car = 0

      # this is a rather time consuming high cpu work load so to reduce this we'll only 
      # scan upto max_check_frames and we'll try to evenly distribute the checks to frames such that we analysis
      # a good amount of the 10-12 second video clip capture that was sent to us...
      frame_count = 0
      check_interval = 20 # check every check_interval frame e.g. frame_count % check_interval == 1
      checked_frames = 0
      max_check_frames = 10

      try:
        while True:
          # Capture frame-by-frame
          ret, frame = cap.read()
          if ret != True:
            break


          frame_count += 1
          if check_all_frames or (frame_count % check_interval == 0 and checked_frames < max_check_frames):
            print("check frames: %d" % frame_count)
            checked_frames += 1

            frame_resized = cv2.resize(frame,(300,300)) # resize frame for prediction

            # MobileNet requires fixed dimensions for input image(s)
            # so we have to ensure that it is resized to 300x300 pixels.
            # set a scale factor to image because network the objects has differents size. 
            # We perform a mean subtraction (127.5, 127.5, 127.5) to normalize the input;
            # after executing this command our "blob" now has the shape:
            # (1, 3, 300, 300)
            blob = cv2.dnn.blobFromImage(frame_resized, 0.007843, (300, 300), (127.5, 127.5, 127.5), False)
            #Set to network the input blob 
            net.setInput(blob)
            #Prediction of network
            detections = net.forward()

            #Size of frame resize (300x300)
            cols = frame_resized.shape[1] 
            rows = frame_resized.shape[0]

            #For get the class and location of object detected, 
            # There is a fix index for class, location and confidence
            # value in @detections array .
            for i in range(detections.shape[2]):
              confidence = detections[0, 0, i, 2] #Confidence of prediction 
              if confidence > ObjDetect_thr: # Filter prediction 
                class_id = int(detections[0, 0, i, 1]) # Class label

                # Object location 
                xLeftBottom = int(detections[0, 0, i, 3] * cols) 
                yLeftBottom = int(detections[0, 0, i, 4] * rows)
                xRightTop   = int(detections[0, 0, i, 5] * cols)
                yRightTop   = int(detections[0, 0, i, 6] * rows)
                
                # Factor for scale to original size of frame
                heightFactor = frame.shape[0]/300.0  
                widthFactor = frame.shape[1]/300.0 
                # Scale object detection to frame
                xLeftBottom = int(widthFactor * xLeftBottom) 
                yLeftBottom = int(heightFactor * yLeftBottom)
                xRightTop   = int(widthFactor * xRightTop)
                yRightTop   = int(heightFactor * yRightTop)
                # Draw location of object  
                cv2.rectangle(frame, (xLeftBottom, yLeftBottom), (xRightTop, yRightTop),
                              (0, 255, 0))

                # Draw label and confidence of prediction in frame resized
                if class_id in ObjDetect_classNames:
                  label = ObjDetect_classNames[class_id] + ": " + str(confidence)
                  labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)

                  yLeftBottom = max(yLeftBottom, labelSize[1])
                  cv2.rectangle(frame, (xLeftBottom, yLeftBottom - labelSize[1]),
                                       (xLeftBottom + labelSize[0], yLeftBottom + baseLine),
                                       (255, 255, 255), cv2.FILLED)
                  cv2.putText(frame, label, (xLeftBottom, yLeftBottom),
                              cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

                  print(label) #print class and confidence
                  # person: 0.9904131
                  if ObjDetect_classNames[class_id] == 'person':
                    detected_human = 1

                  if ObjDetect_classNames[class_id] == 'car':
                    detected_car = 1

            #cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
            #cv2.imshow("frame", frame)
          #else:
          #  print("skip analysis of frame: %d" % frame_count)

          if out == None:
            fshape = frame.shape
            print("init frame output")
            fheight = fshape[0]
            fwidth = fshape[1]
            print(fwidth, fheight)

            out = cv2.VideoWriter('/tmp/video-boxed.mp4', fourcc, 25.0, (fwidth,fheight))

          out.write(frame)

          #if cv2.waitKey(1) >= 0:  # Break with ESC 
          #    break
      except KeyboardInterrupt:
        print("Quit")
      finally:
        print("cleaning up input and output")
        out.release()
        cap.release()

      # if we detected a human or car alert
      # archive all recordings (we need that ssd drive)
      print("finished analysis : %d %d" % (detected_human, detected_car))

      if detected_human == 1:
        print("Alert a human entered")
        # TODO: do face detection on the human frames and see if it's a human we know?
        b2 = B2(key_id=b2_id, application_key=b2_app_key)
        bucket = b2.buckets.get('monitors')
        os.system("/usr/bin/ffmpeg -y -i /tmp/video-boxed.mp4 -vf scale=320:240 /tmp/video-sized.mp4")

        image_file = open('/tmp/video-sized.mp4', 'rb')
        new_file = bucket.files.upload(contents=image_file, file_name='capture/person.mp4')
        print(new_file.url)
        image_file.close()

        client = Client(account_sid, auth_token)
        message = client.messages.create(to = '+14109806647',
                                         from_= '+15014564510',
                                         media_url=[new_file.url],
                                         body =  "I detected a human: %s" % new_file.url)
      if detected_car == 1:
        print("Alert a car entered")
        # TODO: is it one of our cars?
        b2 = B2(key_id=b2_id, application_key=b2_app_key)
        bucket = b2.buckets.get('monitors')
        os.system("/usr/bin/ffmpeg -y -i /tmp/video-boxed.mp4 -vf scale=320:240 /tmp/video-sized.mp4")
        # ffmpeg -i output.mp4 -vf scale=320:240 output_320.mp4

        image_file = open('/tmp/video-sized.mp4', 'rb')
        new_file = bucket.files.upload(contents=image_file, file_name='capture/car.mp4')
        print(new_file.url)
        image_file.close()

        client = Client(account_sid, auth_token)
        message = client.messages.create(to = '+14109806647',
                                         from_= '+15014564510',
                                         media_url=[new_file.url],
                                         body =  "I detected a car: %s" % new_file.url)

      print("job done") 
示例#5
0
文件: sms.py 项目: taf2/pi-alert
from twilio.rest import Client
from b2blaze import B2

b2_id = os.environ.get('b2_id')
b2_app_key = os.environ.get('b2_app_key')

account_sid = os.environ.get('account_sid')
auth_token = os.environ.get('auth_token')

client = Client(account_sid, auth_token)

with picamera.PiCamera() as camera:
    camera.capture_sequence([
        'image1.jpg',
    ])
    camera.close()

b2 = B2(key_id=b2_id, application_key=b2_app_key)
bucket = b2.buckets.get('monitors')

image_file = open('image1.jpg', 'rb')
new_file = bucket.files.upload(contents=image_file,
                               file_name='capture/result.jpg')
print(new_file.url)

message = client.messages.create(to='+14109806647',
                                 from_='+15014564510',
                                 media_url=[new_file.url],
                                 body="We can do this")
示例#6
0
def get_connected_bucket(bucket_name=None):
    return B2().buckets.get(bucket_name or current_app.config["B2_BUCKET"])
示例#7
0
def upload_html_b2():
    b2 = B2()
    bucket = b2.buckets.get('coviddata')

    text_file = open(html_filename, 'rb')
    bucket.files.upload(contents=text_file, file_name=html_out_filename)
示例#8
0
# Logging configuration
if "DEBUG_FLOOD" in os.environ:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)

logging.getLogger("asyncio").setLevel(logging.DEBUG)
log = logging.getLogger(__name__)

# TODO: Move these constants somewhere.
HEIGHT_PADDING = 25
CDN_BASE = "https://" + os.environ["CDN_URL"] + "/file/" + os.environ[
    "B2_BUCKET"] + "/mailrender_api/"

# B2 utility
b2 = B2()
bucket = b2.buckets.get(os.environ["B2_BUCKET"])

# Service
routes = web.RouteTableDef()
executor = ThreadPoolExecutor(max_workers=4)


@routes.get('/')
async def rootpage(request):
    return web.Response(text="mail render api")


async def upload_file(data: bytes, file_name: str):
    loop = asyncio.get_running_loop()
示例#9
0
def upload_html_b2(html_out_filename):
    b2 = B2()
    bucket = b2.buckets.get(bucketname)

    html_file = open(html_out_filename, 'rb')
    bucket.files.upload(contents=html_file, file_name=html_out_filename)
示例#10
0
def upload_plot(plot_filename):
    b2 = B2()
    bucket = b2.buckets.get(bucketname)
    plot_file = open(plot_filename, 'rb')
    bucket.files.upload(contents=plot_file, file_name=plot_filename)
示例#11
0
文件: core.py 项目: vaz3r/pbmovies
            return 0

    data = json.loads(json_data)
    sorted_obj = dict(data) 
    sorted_obj['data'] = sorted(data['data'], key=sort_by_rating, reverse=True)

    final_data = json.dumps(sorted_obj)

    with open("best\\best-" + genre + ".json", 'w') as f:
        f.write(final_data)

    print(genre + " -> DONE.")

print("Uploading to server...")

b2 = B2(key_id="b41a85681294", application_key="001705ef3076ce3e3899d3a384fdef978e113e0d33")
bucket = b2.buckets.get('movies-db')

for genre in genres:
    #POPULAR DIR
    file_name = "popular\\" + genre + ".json"
    file_handle = open(file_name, 'rb')
    file_name_up = genre + ".json"
    file_upload = bucket.files.upload(contents=file_handle, file_name=file_name_up)
    #BEST DIR
    file_name = "best\\best-" + genre + ".json"
    file_name_up = "best-" + genre + ".json"
    file_handle = open(file_name, 'rb')
    file_upload = bucket.files.upload(contents=file_handle, file_name=file_name_up)

end = time.time()