def calc_optical_flow(im1, im2, image_height, image_width): # calculate dense optical flow # settings from tutorial # https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_lucas_kanade.html cv2_version = int(cv2.__version__.split('.')[0]) frame1 = image_manipulation.resize_image(cv2.imread(im1), image_height, image_width) f1_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) frame2 = image_manipulation.resize_image(cv2.imread(im2), image_height, image_width) f2_gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) # if cv2_version > 2: # # NATES CV2 # flow = cv2.calcOpticalFlowFarneback(f1_gray,f2_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) # else: # # MATTS CV2 # flow = cv2.calcOpticalFlowFarneback(f1_gray,f2_gray, 0.5, 3, 30, 3, 5, 1.2, 0) oflow_tvl1 = cv2.DualTVL1OpticalFlow_create() flow = oflow_tvl1.calc(f1_gray, f2_gray, None) h_oflow = flow[..., 0] v_oflow = flow[..., 1] print "\tBefore adjustment..." print "\tmax: ", h_oflow.max() print "\tmin: ", h_oflow.min() print "\tmean: ", h_oflow.mean() # h_oflow[h_oflow < -127] = -127 # h_oflow[h_oflow > 127] = 127 # v_oflow[v_oflow < -127] = -127 # v_oflow[v_oflow > 127] = 127 # h_oflow = np.rint(h_oflow) # v_oflow = np.rint(v_oflow) #h_oflow += 127 #v_oflow += 127 #h_oflow[h_oflow > 255] = 255 #h_oflow[h_oflow < 0] = 0 #v_oflow[v_oflow > 255] = 255 #v_oflow[v_oflow < 0] = 0 #h_oflow = np.rint(h_oflow) #_oflow = np.rint(v_oflow) print "\tAfter adjustment..." print "\tmax: ", h_oflow.max() print "\tmin: ", h_oflow.min() print "\tmean: ", h_oflow.mean() return h_oflow, v_oflow
def preprocess_and_create_lmdb(list_of_seqs, lmdb_name): print(">>> Write " + str(lmdb_name) + " database...") LMDB_MAP_SIZE = 1 << 40 # MODIFY: just a very large number print("LMDB_MAP_SIZE", LMDB_MAP_SIZE) env = lmdb.open(lmdb_name, map_size=LMDB_MAP_SIZE) count = 0 ### Preprocess sequences into (seq_size*2, image_height, image_width) for s in list_of_seqs: seq = s[0] label = int(s[1]) first = True for of_file in seq: of_img = cv2.imread(of_file).astype(np.float32) of_img = image_manipulation.resize_image(of_img, 100, 100) of_img = image_manipulation.handle_greyscale(of_img) of_img = np.transpose(of_img, (2,0,1)) if (first): seq_for_lmdb = of_img first = False else: seq_for_lmdb = np.append(seq_for_lmdb, of_img, axis=0) #seq_for_lmdb now contains numpy "image" with seq_size*2 channels ### Load sequence into LMDB with env.begin(write=True) as txn: def insert_image_to_lmdb(img_data, label, index): # Create TensorProtos tensor_protos = caffe2_pb2.TensorProtos() img_tensor = tensor_protos.protos.add() img_tensor.dims.extend(img_data.shape) img_tensor.data_type = 1 flatten_img = img_data.reshape(np.prod(img_data.shape)) img_tensor.float_data.extend(flatten_img) label_tensor = tensor_protos.protos.add() label_tensor.data_type = 2 label_tensor.int32_data.append(label) txn.put( '{}'.format(index).encode('ascii'), tensor_protos.SerializeToString() ) if ((index % 100 == 0)): print("Inserted {} rows".format(index)) index = index + 1 return index count = insert_image_to_lmdb(seq_for_lmdb,label,count) print("Inserted {} rows".format(count)) print("\nLMDB saved at " + training_output)
def extract_upload(self,ch, method, properties, body): """ JSON Payload should look like {'user_id':1234, 'picture':'http://foo.bar', 'id':'facebook_image_id'} """ self.logger.info('Processing Record') body = json.loads(body) image_url, user, photo_id = body['source'], body['user_id'], body['id'] normalized = image_manipulation.grayscale( image_manipulation.resize_image(get_image(image_url))) #find roi's roi = image_manipulation.find_faces(normalized) self.logger.debug('found %d regions of interest', len(roi)) #crop images to get faces cropped = [] for face in roi: image_buffer= cStringIO.StringIO() cropped_image = image_manipulation.crop_face(normalized, face) cropped_image.save(image_buffer, format='JPEG') cropped.append(image_buffer) #find the bucket to upload to user_bucket = s3_tools.get_or_create_bucket(self.s3_connection, 'robj.findme') #the created keys created_keys = [] #upload each cropped image to s3 for index, extracted in enumerate(cropped): created_keys.append(s3_tools.upload_string_to_bucket(user_bucket, 'user_images/{}_{}_{}'.format(user,photo_id,index), extracted.getvalue())) for key in created_keys: #post the body back to the image_creation_queue self.rmq_channel.basic_publish(exchange='', routing_key=self.creation_queue_name, body = json.JSONEncoder().encode({ "key": key.name, "user_id": user, "original_image_url": image_url }), properties=pika.BasicProperties( delivery_mode = 2, # make message persistent ) ) self.logger.info('Published image to image creation queue') #we're done, so acknowledge the message self.rmq_channel.basic_ack(delivery_tag = method.delivery_tag)
def test_image_resize(self): """ Test that the image resize function works as expecte """ dimensions = (20, 20) resize_image = image_manipulation.resize_image(self.image, dimensions) self.assertEqual(resize_image.size, dimensions)
im2 = os.path.join(os.path.expanduser('~'), 'DukeML', 'datasets', 'jester', '20bn-jester-v1', '13377', '00011.jpg') # im1_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '00013__noise0.05.jpg') # im2_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '00014__noise0.05.jpg') # im1_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '00013.jpg') # im2_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '00014.jpg') im1_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '13377-00010.jpg') im2_p = os.path.join(os.path.expanduser('~'), 'image_manipulation', 'images', '13377-00011.jpg') i1 = cv2.imread(im1_p) i1 = i1[:, :, (2, 1, 0)] i2 = cv2.imread(im2_p) i2 = i2[:, :, (2, 1, 0)] i1 = image_manipulation.resize_image(i1, 100, 100) i2 = image_manipulation.resize_image(i2, 100, 100) # n=10 # for r in range(60,60+n): # for c in range(50,50+n): # i1[r,c] = (255,0,0) # # for r in range(60,60+n): # for c in range(40,40+n): # i2[r,c] = (255,0,0) # # cv2.imwrite(im1_p, i1) # cv2.imwrite(im2_p, i2)