def post(self): """Brand and model classifcation Image can be loaded either by using an internet URL in the url field or by using a local stored image in the image field """ images = request.files.getlist('image') url = request.form.get('url', None) res = list() if url: try: resp = urlopen(url) img = np.asarray(bytearray(resp.read()), dtype="uint8") img = cv2.imdecode(img, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) res.append(predict_class(img)) except Exception as e: print(url) print(e) if images: for i in range(len(images)): nparr = np.frombuffer(images[i].read(), np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) res.append(predict_class(img)) else: abort(403) return res
def test_class(img_clio4, img_clio_peugeot): print('Testing image', img_clio_peugeot.shape) res = predict_class(img_clio_peugeot) for modele in ['CLIO', '208', 'A1', 'MINI']: assert any([modele in vehicule['brand_model_classif']['label'] for vehicule in res]), 'There is no clio in first predictions' print('Testing image', img_clio4.shape) res = predict_class(img_clio4) assert type(res) == list assert any(['CLIO' in vehicule['brand_model_classif']['label'] for vehicule in res]), 'There is no clio in first predictions'
def lambda_handler_classification(event, context): print("ENV", getenv('BACKEND')) print("ENV", getenv('DETECTION_THRESHOLD')) print("LISTDIR", listdir('/tmp')) res = list() body_str = event.get('image', None) #c_type, c_data = parse_header(event['headers']['Content-Type']) #assert c_type == 'multipart/form-data' #decoded_string = base64.b64decode(event['body']) #form_data = parse_multipart(BytesIO(decoded_string), c_data) if body_str: print(type(body_str)) print(body_str[:100]) # read encoded image imageString = base64.b64decode(body_str) # convert binary data to numpy array nparr = np.frombuffer(imageString, np.uint8) # let opencv decode image to correct format img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR) res.append(predict_class(img)) return {'statusCode': 200, 'body': json.dumps(res)}
def test_class(img_clio4): print('Testing image', img_clio4.shape) res = predict_class(img_clio4) assert type(res) == list assert any(['CLIO' in vehicule['label'] for vehicule in res]), 'There is no clio in first predictions'
def test_class_prio(img_clio4): """Test if 'CLASSIFICATION_MODEL_PRIO' in os.environ """ if 'CLASSIFICATION_MODEL_PRIO' in os.environ: print('Testing image', img_clio4.shape) res = predict_class(img_clio4) #import pdb; pdb.set_trace() assert type(res) == list assert any(['CLIO' in vehicule['brand_model_classif']['label'] for vehicule in res]), 'There is no clio in first predictions' assert any(['AUTRES' in vehicule['prio_classif']['label'] for vehicule in res]), 'There is no clio in first predictions' else: print('!!!! Test not executed, add CLASSIFICATION_MODEL_PRIO path !!!!!')
def long_task(self, video_name, rotation90, prob_detection, prob_classification, selected_fps): logger.debug(video_name) res = dict() cap = cv2.VideoCapture("/tmp/video", ) media_info = MediaInfo.parse('/tmp/video') myjson = json.loads(media_info.to_json()) rotation = myjson['tracks'][1]['rotation'] total_rotation = int(float(rotation) / 90) + int(rotation90 / 90) logger.debug('Rotation total {}'.format(rotation)) while not cap.isOpened(): cap = cv2.VideoCapture("/tmp/video", ) cv2.waitKey(1000) logger.debug("Wait for the header") pos_frame = cap.get(cv2.cv2.CAP_PROP_POS_FRAMES) total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) skip_images = int(fps / selected_fps) logger.debug( "Real fps {}, selected fps: {}, taking 1 image between {}".format( fps, selected_fps, skip_images)) while True: flag, frame = cap.read() if flag: # The frame is ready and already captured pos_frame = int(cap.get(cv2.cv2.CAP_PROP_POS_FRAMES)) # Every 10 frames if pos_frame % skip_images == 0: h, w, _ = frame.shape # frame = frame[0:h,int(2*w/3):w] frame = frame[0:h, 0:w] frame = rotate_frame90(frame, total_rotation) self.update_state(state='PROGRESS', meta={ 'current': pos_frame, 'total': total_frames, 'partial_result': [{ 'frame': res[key]['frame'], 'seconds': res[key]['seconds'], 'model': key, 'img': res[key]['img'] } for key in res] }) output = predict_class(frame) if len(output) > 0: for box in output: logger.debug('Frame {}'.format(pos_frame)) logger.debug(box) if float(box['confidence']) > ( prob_detection / 100) and float( box['prob'][0]) > (prob_classification / 100): logger.debug(box['pred'][0]) # Print detected boxes cv2.rectangle(frame, (box['x1'], box['y1']), (box['x2'], box['y2']), (255, 0, 0), 6) cv2.putText(frame, box['label'], (box['x1'], box['y1'] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # Convert captured image to JPG retval, buffer = cv2.imencode('.jpg', frame) # Convert to base64 encoding and show start of data jpg_as_text = base64.b64encode(buffer) base64_string = jpg_as_text.decode('utf-8') modele = box['pred'][0] res[modele] = { 'frame': pos_frame, 'seconds': pos_frame / fps, 'model': box['pred'][0], 'img': base64_string } else: break return { 'current': total_frames, 'total': total_frames, 'status': 'Task completed!', 'partial_result': [{ 'frame': res[key]['frame'], 'seconds': res[key]['seconds'], 'model': key, 'img': res[key]['img'] } for key in res], 'result': list(res.keys()) }