def getlist(): #response contains the list of all buckets for the selected project response = storage.list_buckets(settings.CLOUD_PROJECT_ID) #objects contains the list of all images in each bucket objects = [] #This statement gets the bucket whose name starts with datacore for lstbct in response['items']: if lstbct['name'].startswith('datacore'): #blobs contains all the images from each bucket blobs = [] #respobject contains all the images from each bucket respobject = storage.list_objects(lstbct['name']) #This statement gets the details of all the images in each bucket for index, bct in enumerate(respobject): #if 'segment/' not in str(bct['name']): blobs.append({ 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) objects.append({ 'bucketName': lstbct['name'], 'blobs': blobs, 'totalBlobs': len(blobs) }) return objects
def detectionHome(request): if request.method == 'GET': ''' This block returns the response for the get request ''' try: #loggedInUser gets the email of the logged in user loggedInUser = request.session['uEmail'] except: #This statement redirects the user if he/she has not logged in return redirect('/welcome') #response contains the list of buckets corresponding to the cloud project ID response = storage.list_buckets(settings.CLOUD_PROJECT_ID) #objects contains only the list of buckets whose name starts with datacore objects = {} #This statement extracts the buckets whose name starts with datacore and its blobs for lstbct in response['items']: if lstbct['name'].startswith('datacore'): #blobs contain the list of objects(images) in each bucket total_blobs = [] original_blobs = [] segment_nonad_blobs = [] segment_ad_blobs = [] respobject = storage.list_objects(lstbct['name']) #This statement extracts the images in the root folder of each bucket for index, bct in enumerate(respobject): if 'segment/ad' in str(bct['name']): segment_ad_blobs.append({ 'bucketType': 'segmentAd', 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) elif 'segment/nonad/' in str(bct['name']): segment_nonad_blobs.append({ 'bucketType': 'segmentNonAd', 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) else: original_blobs.append({ 'bucketType': 'original', 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) total_blobs.append(original_blobs) total_blobs.append(segment_nonad_blobs) total_blobs.append(segment_ad_blobs) objects.update({lstbct['name']: total_blobs}) #context contains the data to be passed to the HTML page for the get request context = { 'loggedIn': True, 'objects': objects, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionHome.html', context) else: ''' This block returns the response for the post request ''' if request.POST['submit'] == 'Image View': selectedImages = request.POST.getlist('miasimages') miasImagesStorage = storage.Client() miasBucket = miasImagesStorage.get_bucket( settings.CLOUD_STORAGE_BUCKET) images = miasBucket.list_blobs() filteredImages = [] for image in images: if 'segment/' in image.name: pass else: filteredImages.append({ 'url': image.public_url, 'name': image.name }) result = [] for eachImage in selectedImages: fname = str(os.path.basename(eachImage)) result.append({'imageUrl': eachImage, 'name': fname}) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Image View', 'miasimages': filteredImages, 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionHome.html', context) elif request.POST['submit'] == 'Text Detection': ''' This block performs the text detection for the selected images ''' #features represent the process that needs to be done on the images features = '5:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected text in the image result = {} #This statement performs the text detection operation in the images for image in selectedImages: #response contains the image data from the google cloud storage bucket response = requests.get(image) #selectedImage contains the data of the image in base64 format selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() #__text contains the text detected in the image if any __text = __generate_json(selectedImage, features) if 'textAnnotations' in __text: for index, ttext in enumerate(__text['textAnnotations'], start=0): if index == 0: result[image] = str(ttext['description']) else: result[image] = 'No text in the image' #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Text Detection', 'resultKeys': result.keys(), 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context) elif request.POST['submit'] == 'Logo Detection': ''' This block performs the logo detection for the selected images ''' #features represent the process that needs to be done on the images features = '3:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected logo in the image result = [] #This statement performs the logo detection operation in the images for image in selectedImages: #response contains the image data from the google cloud storage bucket response = requests.get(image) #selectedImage contains the data of the image in base64 format selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() #__logo contains the logo detected in the image if any __logo = __generate_json(selectedImage, features) if 'logoAnnotations' in __logo: for index, tlogo in enumerate(__logo['logoAnnotations'], start=0): result.append({ 'imageUrl': image, 'logoDescription': str(tlogo['description']), 'percent': str(round(tlogo['score'] * 100)) }) else: result.append({ 'imageUrl': image, 'logoDescription': 'No Logo in the image', 'percent': '0' }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Logo Detection', 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context) elif request.POST['submit'] == 'Text Display': ''' This block performs the text detection and gets the location of the detected text in the selected images ''' #features represent the process that needs to be done on the images features = '5:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected text location in the image result = [] #This statement performs the text detection operation in the images for image in selectedImages: #response contains the image data from the google cloud storage bucket response = requests.get(image) #selectedImage contains the data of the image in base64 format selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() #text contains the text detected in the image if any text = __generate_json(selectedImage, features) im = Image.open(BytesIO(response.content)) draw = ImageDraw.Draw(im) #This statement gets the detected text location in the image and draws a rectangular box around it if 'textAnnotations' in text: for index, ttext in enumerate(text['textAnnotations'], start=0): if index != 0: boundpoly = ttext['boundingPoly']['vertices'][0] x = boundpoly['x'] y = boundpoly['y'] boundpolyvert = ttext['boundingPoly']['vertices'][ 1] xh = boundpolyvert['x'] yh = boundpolyvert['y'] __boundpolyvert_2 = ttext['boundingPoly'][ 'vertices'][2] yh = __boundpolyvert_2['y'] __boundpolyvert_2 = ttext['boundingPoly'][ 'vertices'][3] cor = (x, y, xh, yh) for i in range(3): draw.rectangle(cor, outline="red") cor = (cor[0] + 1, cor[1] + 1, cor[2] + 1, cor[3] + 1) inMemory = BytesIO() im.save(inMemory, format='jpeg') inMemory.seek(0) imgBytes = inMemory.read() b64Image = base64.b64encode(imgBytes) result.append({ 'originalImage': image, 'modifiedImage': b64Image }) else: result.append({ 'originalImage': image, 'modifiedImage': '0' }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Text Display', 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context) elif request.POST['submit'] == 'Logo Display': ''' This block performs the logo detection and gets the location of the detected logo in the selected images ''' #features represent the process that needs to be done on the images features = '3:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected logo in the image result = [] #This statement performs the logo detection operation in the images for image in selectedImages: #response contains the image data from the google cloud storage bucket response = requests.get(image) #selectedImage contains the data of the image in base64 format selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() #t contains the logo detected in the image if any t = __generate_json(selectedImage, features) #This statement gets the detected logo location in the image and draws a rectangular box around it if 'logoAnnotations' in t: t = t['logoAnnotations'][0] boundpoly = t['boundingPoly']['vertices'][0] x = boundpoly['x'] y = boundpoly['y'] boundpolyvert = t['boundingPoly']['vertices'][1] xh = boundpolyvert['x'] yh = boundpolyvert['y'] __boundpolyvert_2 = t['boundingPoly']['vertices'][2] yh = __boundpolyvert_2['y'] __boundpolyvert_2 = t['boundingPoly']['vertices'][3] im = Image.open(BytesIO(response.content)) draw = ImageDraw.Draw(im) cor = (x, y, xh, yh) for i in range(5): draw.rectangle(cor, outline='red') cor = (cor[0] + 1, cor[1] + 1, cor[2] + 1, cor[3] + 1) inMemory = BytesIO() im.save(inMemory, format='png') inMemory.seek(0) imgBytes = inMemory.read() b64Image = base64.b64encode(imgBytes) result.append({ 'originalImage': image, 'modifiedImage': b64Image }) else: result.append({ 'originalImage': image, 'modifiedImage': '0' }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Logo Display', 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context) elif request.POST['submit'] == 'Label Detection': ''' This block performs the label detection in the selected images ''' #features represent the process that needs to be done on the images features = '4:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected label in the image result = [] #This statement performs the label detection operation in the images for image in selectedImages: #labelDetectionResult contains the list of detected labels in the image labelDetectionResult = [] #response contains the image data from the google cloud storage bucket response = requests.get(image) #selectedImage contains the data of the image in base64 format selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() #__label contains the label detected in the image if any __label = __generate_json(selectedImage, features) #This statement extracts the label in the image if 'labelAnnotations' in __label: for index, tlabel in enumerate(__label['labelAnnotations'], start=0): labelDetectionResult.append({ 'labelDescription': str(tlabel['description']), 'percent': str(round(tlabel['score'] * 100)) }) result.append({ 'imageUrl': image, 'labelDetectionResult': labelDetectionResult }) else: labelDetectionResult.append({ 'labelDescription': 'No label in the image', 'percent': '0' }) result.append({ 'imageUrl': image, 'labelDetectionResult': labelDetectionResult }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Label Detection', 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context) elif request.POST['submit'] == 'Automatic Annotation': ''' This block performs the text, logo and label detections in the selected images in a single request ''' #features represent the process that needs to be done on the images features = '3:10 4:10 5:10' #selectedImages contain the list of images selected by the user selectedImages = request.POST.getlist('miasimages') #result contains the detected label in the image result = [] #This statement performs the text, label and logo detection operations in the images for image in selectedImages: resultDictionary = { 'imageUrl': '', 'fileName': '', 'mediaType': '', 'advertise': None, 'logo': '', 'logoPercent': '', 'headLine': '', 'text': '' } resultLabelDictionary = [] resultDictionary['imageUrl'] = image response = requests.get(image) selectedImage = base64.b64encode( BytesIO(response.content).getvalue()).decode() __resp = __generate_json(selectedImage, features) resultDictionary['fileName'] = image.split('/')[4] resultDictionary['mediaType'] = 'Image' resultDictionary['headLine'] = 'N/A' #This statement extracts the detected text in the image if any if 'textAnnotations' in __resp: for index, ttext in enumerate( __resp['textAnnotations'], start=0): # Python indexes start at zero if index == 0: resultDictionary['text'] = str( ttext['description']) else: resultDictionary['text'] = 'No Text in the image' #This statement extracts the detected label in the image if any if 'labelAnnotations' in __resp: for index, tlabel in enumerate(__resp['labelAnnotations'], start=0): resultLabelDictionary.append({ 'labelDescription': str(tlabel['description']), 'labelPercent': str(round(tlabel['score'] * 100)) }) resultDictionary['advertise'] = resultLabelDictionary else: resultDictionary['advertise'] = 'No label in the image' #This statement extracts the detected logo in the image if any if 'logoAnnotations' in __resp: for index, tlogo in enumerate(__resp['logoAnnotations'], start=0): resultDictionary['logo'] = tlogo['description'] resultDictionary['logoPercent'] = str( round(tlogo['score'] * 100)) else: resultDictionary['logo'] = 'No logo in the image' resultDictionary['logoPercent'] = '0' result.append(resultDictionary) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Automatic Annotation', 'result': result, } return render(request, 'detectionresult.html', context) else: #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'detectionresult.html', context)
def imageSearchHome(request): global objects if request.method == 'GET': try: loggedInUser = request.session['uEmail'] except: return redirect('/welcome') response = storage.list_buckets(settings.CLOUD_PROJECT_ID) #This statement gets only the bucket whose name starts with datacore for lstbct in response['items']: if lstbct['name'].startswith('datacore'): #blobs contains the list of cropped and the images in the root folder of each bucket blobs = [] #respobject contains the list of all the images in the bucket respobject = storage.list_objects(lstbct['name']) #This statement gets only the cropped images and the images in the root folder of each bucket for index, bct in enumerate(respobject): #This statement gets the images in the root folder of the bucket if 'segment/' in str(bct['name']): pass else: blobs.append({'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str( bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType'])}) objects.update({lstbct['name']: blobs}) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn' : True, 'operation' : 'get', 'objects': objects, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageSearch.html', context) else: if request.POST['submit'] == 'Image Search': #totalSelectedImages contains the list of images selected by the user totalSelectedImages = request.POST.getlist('miasImages') # client does the required operation on the selected image for annotations client = ImageAnnotatorClient() image_details = [] # image_details contains the list of image information such as image url and features that # need to be done on the images for annotations image_requests = [] # image_features contains the list of operations that need to be performed on the image image_features = [{'type': DETECTION_TYPES[3]}, {'type': DETECTION_TYPES[4]}, {'type': DETECTION_TYPES[5]}] # total_images is a counter for counting the total number of images selected by the user total_images = 0 # image_histograms contains the list of histogram information for the selected images image_histograms = [] # This block gets the histogram information as well as the image information # for the selected images for eachImage in totalSelectedImages: image_requests.append({'image': {'source': {'image_uri': eachImage.split('+')[1]}}, 'features': image_features,}) # image_content contains the image data from the url image_content = requests.get(eachImage.split('+')[1]) # print('image_content: ') # print(type(image_content.content)) image_array = np.asarray(bytearray(BytesIO(image_content.content).getvalue()), dtype='uint8') # print('image_array: ') # print(type(image_array)) converted_image_array = cv2.imdecode(image_array, cv2.IMREAD_COLOR) # print(type('converted_image_array: ')) # print(type(converted_image_array)) individual_image_histogram = [] if converted_image_array.shape[2] != 3: for each_channel in range(0, 3): if each_channel == 0: individual_image_histogram.append({'redChannel': \ cv2.calcHist([converted_image_array], \ [0], None, [256], [0, 256]).tolist() }) elif each_channel == 1: individual_image_histogram.append({'greenChannel': \ cv2.calcHist([converted_image_array], \ [1], None, [256], [0, 256]).tolist() }) else: individual_image_histogram.append({'blueChannel': \ cv2.calcHist([converted_image_array], \ [2], None, [256], [0, 256]).tolist() }) else: individual_image_histogram.append({'singleChannel': \ cv2.calcHist([converted_image_array],[0], None, \ [256], [0, 256]).tolist() }) image_histograms.append({'index': total_images, 'histogram': individual_image_histogram }) total_images = total_images + 1 image_response = client.batch_annotate_images(image_requests) image_labels = [] image_logos = [] for index, eachResponse in enumerate(image_response.responses): if len(image_response.responses[index].label_annotations) != 0: labels = [] for eachLabel in range(len(image_response.responses[index].label_annotations)): labels.append({'labelDescription': \ image_response.responses[index].label_annotations[eachLabel].description, \ 'labelPercent': \ str(round(image_response.responses[index].label_annotations[eachLabel].score * 100)), }) image_labels.append({'index': index, 'labels': labels}) else: image_labels.append({'index': index, 'labels': []}) for index, eachResponse in enumerate(image_response.responses): if len(image_response.responses[index].logo_annotations) != 0: logos = [] for eachLogo in range(len(image_response.responses[index].logo_annotations)): logos.append({'logoDescription': \ image_response.responses[index].logo_annotations[eachLogo].description, \ 'logoPercent': \ str(round(image_response.responses[index].logo_annotations[eachLogo].score * 100)), }) image_logos.append({'index': index, 'logos': logos}) else: image_logos.append({'index': index, 'logos': []}) # datastore_client contains methods for accessing GCP's datastore datastore_client = datastore.Client() # This block creates entity in the datastore kind MIASJSON for storing image information as a # JSON format for index, eachImage in enumerate(totalSelectedImages, 0): # key creates a key for the kind MIASJSON in the datastore key = datastore_client.key('MIASJSON', eachImage.split('+')[0]) # entity creates an entity in the MIASJSON kind in the datastore entity = datastore.entity.Entity(key, ('image_info', 'text_info', \ 'label_info', 'logo_info', \ 'histogram_info')) # This statement creates a property named image_info which stores image data # such as image name and its uri entity['image_info'] = json.dumps({'image_name': eachImage.split('+')[0], 'image_url': eachImage.split('+')[1], }) # This statement creates a property named text_info which stores text data # if available in the image entity['text_info'] = json.dumps({'text_data': image_response.responses[index].text_annotations[0].description \ if (len(image_response.responses[index].text_annotations) != 0) \ else 'No text in the image', }) # This statement creates a property named label_info which stores label data # if available in the image entity['label_info'] = json.dumps({'label_data': 'No label in the image' \ if (len(image_labels[index]['labels']) == 0) \ else image_labels[index]['labels'], }) # This statement creates a property named logo_info which stores logo data # if available in the image entity['logo_info'] = json.dumps({'logo_data': 'No logo in the image' \ if (len(image_logos[index]['logos']) == 0) \ else image_logos[index]['logos'], }) # This statement creates a property named histogram_info which stores histogram data # if available in the image entity['histogram_info'] = json.dumps({'histogram_data': image_histograms[index]['histogram']}) datastore_client.put(entity) # context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'ImageSearch', 'objects': objects, 'result': True, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageSearch.html', context) elif request.POST['submit'] == 'Similarity': #totalSelectedImages contains the list of images selected by the user totalSelectedImages = request.POST.getlist('miasImages') ed_result = [] for eachImage in totalSelectedImages: image_content = requests.get(eachImage.split('+')[1]) image_array = np.asarray(bytearray(BytesIO(image_content.content).getvalue()), dtype='uint8') converted_image_array = cv2.imdecode(image_array, cv2.IMREAD_COLOR) selected_image_histogram = cv2.calcHist([converted_image_array],[0], None, \ [256], [0, 256]) # datastore_client contains methods for accessing GCP's datastore datastore_client = datastore.Client() datastore_query = datastore_client.query(kind = 'MIASJSON') result = list(datastore_query.fetch()) similarity_result = [] for eachResult in result: individual_result = json.loads(dict(eachResult)['histogram_info']) histogram_data = np.array(individual_result['histogram_data'][0]['singleChannel']) euclidean_distance = np.sqrt(np.sum((selected_image_histogram - histogram_data) ** 2)) similarity_result.append({'image_name': eachResult.key.name, 'distance': euclidean_distance}) # print(euclidean_distance) ed_result.append({'image_name': eachImage.split('+')[0], 'similarity_result': similarity_result}) # context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Similarity', 'objects': objects, 'ed_result': ed_result, 'result': True, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageSearch.html', context)
def segmentationHome(request): global result if request.method == 'GET': ''' This block returns the response for the get request for /segmentation url ''' try: #loggedInUser gets the email of the logged in user loggedInUser = request.session['uEmail'] except: #This statement redirects the user if he/she has not logged in return redirect('/welcome') #response contains the list of buckets corresponding to the cloud project ID response = storage.list_buckets(settings.CLOUD_PROJECT_ID) #objects contains only the list of buckets whose name starts with datacore objects = {} #This statement extracts the buckets whose name starts with datacore and its blobs for lstbct in response['items']: if lstbct['name'].startswith('datacore'): #blobs contain the list of objects(images) in each bucket blobs = [] respobject = storage.list_objects(lstbct['name']) filteredImageName = None #This statement extracts the images in the root folder of each bucket for index, bct in enumerate(respobject): if 'segment/' in str(bct['name']): pass else: filteredImageName = str(bct['name']) blobs.append({ 'index': index, 'name': str(bct['name']), 'filteredName': filteredImageName, 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) objects.update({lstbct['name']: blobs}) #context contains the data to be passed to the HTML page for the get request context = { 'loggedIn': True, 'objects': objects, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'segmentationHome.html', context) else: ''' This block returns the response for the post request for /segmentation url ''' if request.POST['submit'] == 'AD Segment': ''' This block performs the segmentation in the image ''' #selectedImages contains the list of images selected by the user selectedImages = request.POST.getlist('miasimages') result = [] #threads contains the list of threads being created threads = [] #initThreadsCnt gives the total number of threads currently available initThreadsCnt = threading.active_count() sensitivity_type = request.POST['Sensitivity'] # sensitivity_point = int(request.POST['sPoint']) print("Initial Thread cnt => " + str(initThreadsCnt)) for index, image in enumerate(selectedImages): t = threading.Thread(target=onAdSegment, args=(image, index)) threads.append(t) t.start() cnt = 0 print("After Thread cnt => " + str(threading.active_count())) while (initThreadsCnt != threading.active_count()): cnt += 1 time.sleep(1) if cnt > 20: break #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'segmentationResult.html', context) elif request.POST['submit'] == 'Contour Segment': ''' This block performs the contour segment in the image ''' selectedImages = request.POST.getlist('miasimages') result = [] for image in selectedImages: response = requests.get(image.split('+')[0]) selectedImage = base64.b64encode( BytesIO(response.content).getvalue()) img = np.asarray(bytearray( BytesIO(response.content).getvalue()), dtype='uint8') convertedImage = cv2.imdecode(img, cv2.IMREAD_COLOR) gray = cv2.cvtColor(convertedImage, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 10, 100, apertureSize=3) _, contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, c in enumerate(contours): area = cv2.contourArea(c) if area > 1000: cv2.drawContours(convertedImage, contours, i, (255, 0, 0), 3) im = Image.fromarray( cv2.cvtColor(convertedImage, cv2.COLOR_BGR2RGB)) inMemory = BytesIO() im.save(inMemory, format='jpeg') inMemory.seek(0) imgBytes = inMemory.read() b64Image = base64.b64encode(imgBytes) result.append({ 'inputImage': image.split('+')[0], 'resultImage': b64Image }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'segmentationResult.html', context) else: ''' This block performs the hough lines in the image ''' #selectedImages contains the list of images selected by the user selectedImages = request.POST.getlist('miasimages') result = [] for image in selectedImages: response = requests.get(image.split('+')[0]) selectedImage = base64.b64encode( BytesIO(response.content).getvalue()) img = np.asarray(bytearray( BytesIO(response.content).getvalue()), dtype='uint8') convertedImage = cv2.imdecode(img, cv2.IMREAD_COLOR) gray = cv2.cvtColor(convertedImage, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 10, 100, apertureSize=3) lines1 = cv2.HoughLinesP(edges, 1, np.pi, threshold=100, minLineLength=100, maxLineGap=1) for x in range(0, len(lines1)): for x1, y1, x2, y2 in lines1[x]: cv2.line(convertedImage, (x1, y1), (x2, y2), (255, 0, 0), 3) lines2 = cv2.HoughLinesP(edges, 1, np.pi / 2, threshold=100, minLineLength=100, maxLineGap=1) for x in range(0, len(lines2)): for x1, y1, x2, y2 in lines2[x]: cv2.line(convertedImage, (x1, y1), (x2, y2), (255, 0, 0), 3) im = Image.fromarray( cv2.cvtColor(convertedImage, cv2.COLOR_BGR2RGB)) inMemory = BytesIO() im.save(inMemory, format='jpeg') inMemory.seek(0) imgBytes = inMemory.read() b64Image = base64.b64encode(imgBytes) result.append({ 'inputImage': image.split('+')[0], 'resultImage': b64Image }) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'result': result, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'segmentationResult.html', context)
def imageCropHome(request): global adImages global nonAdImages global objects if request.method == 'GET': ''' This block returns the response for the get request ''' try: #loggedInUser contains the email of the user if he/she has logged in loggedInUser = request.session['uEmail'] except: #This statement redirects the user to the welcome page if he/she has not logged in return redirect('/welcome') #response contains the list of all the buckets for the selected project response = storage.list_buckets(settings.CLOUD_PROJECT_ID) #This statement gets only the bucket whose name starts with datacore for lstbct in response['items']: if lstbct['name'].startswith('datacore'): #blobs contains the list of cropped and the images in the root folder of each bucket blobs = [] #respobject contains the list of all the images in the bucket respobject = storage.list_objects(lstbct['name']) #This statement gets only the cropped images and the images in the root folder of each bucket for index, bct in enumerate(respobject): #This statement gets the cropped images in the bucket if 'segment/ad/cropped' in str(bct['name']): blobs.append({ 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) else: #This statement gets the images in the root folder of the bucket if 'segment/' in str(bct['name']): pass else: blobs.append({ 'index': index, 'name': str(bct['name']), 'public_url': 'https://storage.googleapis.com/' + lstbct['name'] + '/' + str(bct['name']), 'timecreated': datetime.strptime(str(bct['timeCreated']), '%Y-%m-%dT%H:%M:%S.%fZ'), 'type': str(bct['contentType']) }) objects.update({lstbct['name']: blobs}) #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'get', 'objects': objects, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageCropping.html', context) else: if request.POST['submit'] == 'Perform Image Crop': ''' This block returns the data for the selected images to perform cropping ''' #totalSelectedImages contains the list of images selected by the user totalSelectedImages = [] #selectedCropImages contains the list of images cropped by the user selectedCropImages = request.POST.getlist('miasCropImages') count = 0 #This statement returns the data for the selected images for cropping for selectedCropImage in selectedCropImages: #responseCrop gets the data for the selectedImage from the google cloud storage for cropping responseCrop = requests.get(selectedCropImage.split('+')[1]) #CropImage contains the selected image data in base64 format CropImage = base64.b64encode( BytesIO(responseCrop.content).getvalue()).decode() #imageName creates the name for the cropped image imageName = 'segment/ad/' + 'cropped_' + selectedCropImage.split( '+')[0] totalSelectedImages.append({ 'imageName': imageName, 'imageData': CropImage }) count += 1 #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'count': count, 'operation': 'Crop', 'objects': objects, 'selectedImages': totalSelectedImages, 'selectedBucket': selectedCropImage.split('+')[2], } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageCropping.html', context) if request.POST['submit'] == 'Save': ''' This block saves the cropped images in the respective bucket ''' miasImagesStorage = gStorage.Client() #miasBucket contains the bucket selected by the user miasBucket = miasImagesStorage.get_bucket( request.POST['selectedBucket']) #totalCroppedImage contains the total number of images cropped by the user totalCroppedImage = int(request.POST['totalCount']) #This statement saves the cropped images in the respective folder for croppedImages in range(totalCroppedImage): try: #imageName contains the name of the cropped image imageName = request.POST['imageNameHidden_' + str(croppedImages)] #This statement saves the cropped image in the ad folder in the bucket if 'segment/ad' in imageName: #croppedImageData contains the data for the cropped image croppedImageData = request.POST['imageDataHidden_' + str(croppedImages)] #b64CroppedImageData contains the base64 data for the cropped image b64CroppedImageData = croppedImageData[22:] #blob creates an object for the cropped image in the google cloud storage bucket blob = miasBucket.blob(imageName) #This statement uploads the base64 image data for the cropped image to the created object in the google cloud #storage bucket blob.upload_from_string( base64.b64decode(b64CroppedImageData), ('image/' + imageName.split('.')[1])) else: #This statement saves the cropped image in the ad folder in the bucket #croppedImageData contains the data for the cropped image croppedImageData = request.POST['imageDataHidden_' + str(croppedImages)] #b64CroppedImageData contains the base64 data for the cropped image b64CroppedImageData = croppedImageData[22:] #blob creates an object for the cropped image in the google cloud storage bucket blob = miasBucket.blob(imageName) #This statement uploads the base64 image data for the cropped image to the created object in the google cloud #storage bucket blob.upload_from_string( base64.b64decode(b64CroppedImageData), ('image/' + imageName.split('.')[1])) except: pass #context contains the data to be passed to the HTML page for the post request context = { 'loggedIn': True, 'operation': 'Save', 'objects': objects, } #This statement returns the response as a HTML page along with the data to be displayed in it return render(request, 'imageCropping.html', context)