def __init__(self): self.image_processor = ImageProcessor(crop_face=CROP_FACES) self.model_manager = ModelManager() # initialize volatile variables self.prediction_model = MobilenetPredictor() self.personal_trainer = PersonalTrainer() self.sessions = {}
def __init__(self, root, im_file, masks_dir, is_debug=True): ImageProcessor.__init__(self, root, im_file, masks_dir) # keep green channel self._image = self.image[:, :, 1].copy() self._norm_const = 2.45 self._is_debug = is_debug
def collect_samples(): cap = cv2.VideoCapture(0) img_process = ImageProcessor() count = -1 frames = [] while True: print(f"Count = {count}") _, frame = cap.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = img_process.horizontal_flip(frame) cv2.imshow('Frame', frame) key = cv2.waitKey(1) if key == ord('s'): count = 1 if count > 0 and count <= SAMPLES_TO_COLLECT: frames.append(frame) count += 1 elif count > SAMPLES_TO_COLLECT: break if key & 0xFF == ord('q'): break ind = 0 while True: cv2.imshow('Part', frames[ind]) key = cv2.waitKey(1) if key == ord('s'): ind += 1
def process_images(params): img_path = params processor = ImageProcessor(os.path.join(args.data_dir, 'images'), os.path.join(args.data_dir, 'masks'), CLASSES, overlap=args.overlap, window_size=args.window_size) return processor.process_image(img_path)
def test_init(self): """ Just after instantiating the class, a matcher is not set and thus, an exception should be raised if matcher method was to be invoked. """ processor = ImageProcessor() with pytest.raises(Exception): processor.matcher()
def setupWindow(): filename = getUserSelectedImage() imageProcessor = ImageProcessor(cv2.imread(filename,0)) colourImage = cv2.imread(filename,1) image = imageProcessor.getThresholdedImage(False) granularity = imageProcessor.get_granularity(image, 100) print("Granularity: {0}".format(granularity)) start_x,start_y,end_x,end_y = get_start_points(image) image = imageProcessor.encloseMaze(image) pg = PolicyGenerator(image) rows,cols = pg.get_critical_grid() graph,mapping = pg.get_reduced_graph([rows,cols]) policy = pg.generate_policy((end_x,end_y)) solution = solve_using_policy(policy,(start_x,start_y),(end_x,end_y)) imageProcessor.draw_policy(colourImage,policy) imageProcessor.mark_point((end_x,end_y),3,(255,0,0),colourImage) #cv2.imshow(MAZE_NAME,policy_image) mazerunner = MazeSolver(image,granularity) #solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y) if(not solution): cv2.imshow(MAZE_NAME,image) else: solvedImage = draw_solution(solution, colourImage) solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage) window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL) cv2.resizeWindow("Solved Image", 900,900) cv2.moveWindow("Solved Image",100,100) cv2.imshow("Solved Image",solvedImage) print("Press any key to exit") cv2.waitKey(0) cv2.destroyAllWindows
async def logo_detection(files: List[UploadFile] = File(...)): response = None try: image_processor = ImageProcessor() data = json.dumps({image_processor.detect_logos(files[0])}) response = Response(content=data, media_type="application/json") except Exception as error: logging.error("Error while uploading file ", str(error)) response = {"Error ": str(error)} return response
def post(self): datafile = self.request.files['webcam'][0] image = ImageProcessor(datafile['body']) image.process() return self.write_json({'success': True, 'text': image.text, 'pan_no': image.pan_no })
def image_grayscale_normalize(self): _x_train = [] _x_test = [] for img in self.x_train: _x_train.append(ImageProcessor.grayscale_normalize(img)) for img in self.x_test: _x_test.append(ImageProcessor.grayscale_normalize(img)) self.x_train = np.array(_x_train) self.x_test = np.array(_x_test)
def __init__(self, width, height): pg.init() self.W = width self.H = height self.screen = pg.display.set_mode((self.W, self.H)) self.running = True self.scl = 40 self.cols = self.W // self.scl self.rows = self.H // self.scl self.grid = [[0 for i in range(self.cols)] for j in range(self.rows)] self.ip = ImageProcessor() self.model = ModelLoader('model/trained_model_2.pt').load_model()
def test_live_effecient(square_dim=320): cap = cv2.VideoCapture(0) img_process = ImageProcessor() base_imgs = None test_max_black_pixel_count = 0 drum_area1 = DrumArea(top_left_corner=(100,10), square_dim=square_dim, sound='j') drum_area2 = DrumArea(top_left_corner=(100,320), square_dim=square_dim, sound='c') drum_areas = [drum_area1, drum_area2] area_listener = AreaListener(drum_areas = drum_areas) last_states = [False for i in range(len(drum_areas))] max_black_pixel = [0 for i in range(len(drum_areas))] while True: _, frame_orig = cap.read() frame_orig = img_process.horizontal_flip(frame_orig) area_listener.draw_area(frame_orig) if not base_imgs: area_listener.set_base_image(frame_orig) base_imgs = area_listener.get_base_imgs(resize_dim=RESIZE_DIM) target_areas = area_listener.get_all_target_areas(\ frame_orig, resize_dim=RESIZE_DIM) for i,target_area in enumerate(target_areas): diff = cv2.absdiff(target_area, base_imgs[i]) diff_gray = np.asarray(cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)) if test_max_black_pixel_count < 100: max_black_pixel[i] = max(max_black_pixel[i], max(diff_gray.flatten())) else: diff_gray[diff_gray > max_black_pixel[i]] = 255 diff_gray[diff_gray <= max_black_pixel[i]] = 0 num_whites = len(diff_gray[diff_gray == 255]) if num_whites > THRESHOLD_NUM_WHITES: if not last_states[i]: last_states[i] = True drum_areas[i].playSound() drum_areas[i].markPlayed(frame_orig) else: last_states[i] = False cv2.waitKey(1) test_max_black_pixel_count += 1 cv2.imshow('Main', frame_orig) key = cv2.waitKey(1) if key & 0xFF == ord('q'): break
def main(): # Basically this is for testing ir = ImageReceiver("right_hand_camera") il = ImageReceiver("left_hand_camera") ih = ImageReceiver("head_camera") ip = ImageProcessor() # need one of these per process rospy.init_node("ImageReceiver", anonymous=True) il.disableCamera() ih.disableCamera() ir.enableCamera() image = ir.getImage() ip.setImage(image) ip.findBlock("PURPLE")
def test_image_processor(): ip = ImageProcessor() test_img_paths = os.listdir('test_images') for i, img in enumerate(os.listdir(TEST_DIR)): typ = ip.predict_type(os.path.join(TEST_DIR, img)) result = "passed" if typ in ('Top', 'Bottom', 'Full-body') else "failed" print("Type prediction test %d %s" % ((i+1), result)) possible_attributes = get_list_of_attributes() for i, img in enumerate(os.listdir(TEST_DIR)): _, attrs = ip.predict_attributes(os.path.join(TEST_DIR, img)) result = "passed" if all(a in possible_attributes for a in attrs) else "failed" print("Attribute prediction test %d %s" % ((i+1), result))
def augment_data(self): data = {} for cls in np.unique(self.y_train): data[cls] = [] for img, cls in zip(self.x_train, self.y_train): data[cls].append(img) _x_train = [] _y_train = [] for cls, imgs in data.items(): c_length = len(imgs) _x_train.extend(imgs) _y_train.extend([cls] * c_length) idx, counter, target = 0, c_length, 1200 while counter <= target: _x_train.append(ImageProcessor.random_transform(imgs[idx])) _y_train.append(cls) counter += 1 idx += 1 if (idx < c_length - 1) else 0 self.x_train = np.array(_x_train) self.y_train = np.array(_y_train)
def resize_selected(self): """ Уменьшает все выбранные изображения """ selected_items = self.get_selected_files() selected_files = [] for index in selected_items: one = os.path.join(self.opened_path, self.opened_files[int(index)]) selected_files.append(one) self.progressbar['maximum'] = len(selected_files) for num, one in enumerate(selected_files): ImageProcessor.shrink_2x(one) self.progressbar['value'] = num + 1 self.root.update() self.read_files_in()
class Shwabe(object): def __init__(self): self.camera = Camera() self.processor = ImageProcessor() # self.mouse = Mouse(self.camera.shape[:2]) self.mouse = Mouse((480, 640)) self.brain = MainBrain() # self.mouse = Mouse(self.camera.get_camera_view()[:2]) def main_loop(self): while True: input_frame = self.camera.get_frame() # print("===========", input_frame.shape) self.mouse.update_view_size(input_frame.shape) # self.processor.extract_morph_from_img(input_frame) # center = self.processor.draw_circle() # if center: # self.mouse.move(center) # self.processor.draw_line() # self.processor.draw_windows() masked_image = self.processor.extract_mask(input_frame) self.brain.find_contours(masked_image) self.brain.find_defects_point() self.brain.draw_circle() # self.brain.draw_line() # self.brain.move_stats self.mouse.search_trigger(self.brain.move_stats, self.brain.move_cap, self.brain.movement_delta) self.brain.show_windows()
def __init__(self): dispatcher.connect(self.handle_image_load_event, signal='load_image', sender=dispatcher.Any) dispatcher.connect(self.handle_filter_event, signal='apply_filter', sender=dispatcher.Any) self.image = None self.im_processor = ImageProcessor() self.user_interface = GUI() self.user_interface.run()
class Game: def __init__(self, width, height): pg.init() self.W = width self.H = height self.screen = pg.display.set_mode((self.W, self.H)) self.running = True self.scl = 40 self.cols = self.W // self.scl self.rows = self.H // self.scl self.grid = [[0 for i in range(self.cols)] for j in range(self.rows)] self.ip = ImageProcessor() self.model = ModelLoader('model/trained_model_2.pt').load_model() def draw_grid(self): black = 0, 0, 0 for col in range(self.cols): var = (col * self.scl) - 1 pg.draw.line(self.screen, black, (var, 0), (var, self.H), 1) for row in range(self.rows): var = (row * self.scl) - 1 pg.draw.line(self.screen, black, (0, var), (self.W, var), 1) def draw_node(self): if pg.mouse.get_pressed()[0]: x, y = pg.mouse.get_pos() x = int(x // self.scl) y = int(y // self.scl) pg.draw.rect(self.screen, 0, (x * self.scl, y * self.scl, self.scl, self.scl)) self.grid[y][x] = 255 for c in range(self.cols): for r in range(self.rows): if self.grid[r][c] == 255: pg.draw.rect( self.screen, 0, (c * self.scl, r * self.scl, self.scl, self.scl)) def reset(self): self.__init__(self.W, self.H) def run(self): while self.running: for event in pg.event.get(): if event.type == pg.QUIT: self.running = False if event.type == pg.KEYDOWN: if event.key == pg.K_RETURN: self.img = self.ip.process_image(np.array(self.grid)) out = self.model(torch.tensor( self.img)).detach().numpy() print(f'Prediction: {np.argmax(out)}') self.reset() self.screen.fill((255, 255, 255)) self.draw_grid() self.draw_node() pg.display.flip()
def __init__(self, ref_frames_data_filename, ref_pickle_filename, test_pickle_filename): print "init..." self.previous_obs = None self.image_processor = ImageProcessor() self.descriptors_ref = self.image_processor.load_sift(ref_pickle_filename) self.descriptors_test = self.image_processor.load_sift(test_pickle_filename) hmm = self.ref_frames_data_to_hmm(ref_frames_data_filename) #emission_probabilities = map(lambda x: complementary_normal_distribution_cdf(x,0,EMISSION_SIGMA),range(0,int(3.0*EMISSION_SIGMA))) priors=dict([(state,1.0/len(hmm)) for state in hmm]) self.viterbi = Viterbi(hmm,self.emission_probability, constraint_length=2500, # BE CAREFUL with it. walking may take long time and higher value may be needed here priors=priors)
def config(self): """Configures the application at start-up. Controllers are responsible for initializing the application and creating all of the other objects. This method does just that. It loads the currently selected image file, creates an ImageArray for that file, creates an ImageProcessor to handle the array, and connects the ImageProcessor to the two ImagePanel objects.""" # Load the image into an array image_array = ImageArray.LoadFile(self.source) # Create the processor for the given ImageArray self.image_processor = ImageProcessor(image_array) # Set up the display panels self.original_image_panel = ImagePanel(self.original_image, self.image_processor.original) self.current_image_panel = ImagePanel(self.current_image, self.image_processor.current)
def __init__(self, main_window): self.main_window = main_window self.files_loader = FilesLoader(self.main_window) self.rgb_thr_processor = RGBThreshProcessor(self.main_window) self.image_processor = ImageProcessor(self.main_window, self)
class RecordStore(): def __init__(self,fileScanner): self.fileScanner = fileScanner self.imageProcessor = ImageProcessor() def process(self,aid,relative_folder,orderid): filenames = self.fileScanner.scan(relative_folder) logger.info('total files:%d' % len(filenames)) #TODO how to handle exception? cache single step? or let all break ? counter = 0 for f in filenames: iid = cmsService.create_img(self.compose_image(relative_folder,f,aid)) cmsService.add_order_img(iid,orderid) #zoom relative_folder_file = relative_folder + '/' + f large_relative = self.imageProcessor.thumbnail(relative_folder_file) medium_relative = self.imageProcessor.medium(relative_folder_file) thumb_relative = self.imageProcessor.large(relative_folder_file) img_store = config.img_store if img_store == 'oss': # upload_file_to_oss(raw_relative_dir+"/"+imgname,(local_tmp_path_pattern%(raw_full_store_dir,imgname))) upload_file_to_oss('img'+large_relative,config.img_save_path+large_relative) upload_file_to_oss('img'+medium_relative,config.img_save_path+medium_relative) upload_file_to_oss('img'+thumb_relative,config.img_save_path+thumb_relative) counter += 1 cmsService.update_order_img_counter(orderid,counter) logger.info('------------------------------done for(%s,%d:%d)---------------------------'%(relative_folder,len(filenames),counter)) return counter def compose_image(self, relative_path, title,aid): image = Image(title=title) image.file = relative_path + '/' + title image.aid = aid image.itype = Image.IMG_TYPE_USER return image
def get_start_points(image): window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL) cv2.resizeWindow(MAZE_NAME, 900,900) cv2.imshow(MAZE_NAME,image) cv2.moveWindow(MAZE_NAME,100,100) imageProcessor = ImageProcessor(image) start_x,start_y = imageProcessor.getDefaultStart(image) end_x, end_y = imageProcessor.getDefaultEnd(image) print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)") key = cv2.waitKey(2000) print key if key == ord('a') or key == -1: print("Using Default Start and End Points") imageProcessor = ImageProcessor(image) start_x,start_y = imageProcessor.getDefaultStart(image) end_x, end_y = imageProcessor.getDefaultEnd(image) print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y))) elif key == ord ('s'): print("Please select a start point") start_x,start_y = get_user_selected_point(image) print ("Start Point: {0}, please select an end point".format((start_x,start_y))) end_x,end_y = get_user_selected_point(image) print("End Pont: {0}".format((end_x,end_y))) cv2.destroyAllWindows() return start_x,start_y,end_x,end_y
def stage_find(self, should_run=None): """ The arm is actively using the end-effector camera to find objects of the correct size. :param should_run: a `threading.Event` that tells the stage to continue if `is_set()` is True :return: a dict containing this stage's results """ log.info("[stage_find] _begin_") r = dict() ip = ImageProcessor(res_width=MAX_IMAGE_WIDTH, res_height=MAX_IMAGE_HEIGHT) ip.capture_frame() log.info('[stage_find] max_pixel_count is:{0}'.format( ip.max_pixel_count)) # check to see if the image processor found an object that is larger # than the minimum object size we want to try to pickup if ip.max_pixel_count > MIN_OBJECT_SIZE: print('largest object is:{0}'.format(ip.largest_object_id)) print('largest object X coord is:{0}'.format(ip.largest_X)) print('largest object Y coord is:{0}'.format(ip.largest_Y)) r['x'] = ip.largest_X r['y'] = ip.largest_Y r['filename'] = ip.filename log.info("[stage_find] found object at x:{0} y:{1}".format( r['x'], r['y'])) ip.close() log.info("[stage_find] _end_") return r else: # did not find an object larger than the minimum size - return # NO_BOX_FOUND log.info("[stage_find] no object larger than:{0}".format( MIN_OBJECT_SIZE)) ip.close() log.info("[stage_find] _end_") return NO_BOX_FOUND
from __future__ import absolute_import, division, print_function, unicode_literals from PIL import Image import os import numpy as np from image_processor import ImageProcessor result_dict = { './pictures/border':'1,0,0,0,0,', './pictures/flat_cap':'0,1,0,0,0,', './pictures/labrador':'0,0,1,0,0,', './pictures/top_hat':'0,0,0,1,0,', './pictures/whippet':'0,0,0,0,1,'} ip = ImageProcessor() with open('Data/learndb.csv','w') as db: for dname, dnames, fnames in os.walk('./pictures'): if dname != './pictures' and len(fnames) > 0 and not '__' in dname: for i,f in enumerate(fnames): hogs = ip.get_hogs(np.array(Image.open(os.path.join(dname, f))), 200) for hog in hogs: db.write(result_dict[dname]) db.write(','.join(['0' if k < 0.001 and k > -0.001 else '{:3.3f}'.format(k) for k in hog])) db.write(chr(10)) print(dname, i)
def process_files(self): print 'processing files...' img_p = ImageProcessor([f[1] for f in self.files]) self.processed = img_p.process_images() print '...done'
class Main(BoxLayout): """Instance is a controller for the primary application. This controller manages all of the buttons and text fields of the application. It instantiates ImageProcessor (the student defined class), and uses that sub-controller to process images. The View for this controller is defined in imager.kv.""" # These fields are 'hooks' to connect to the imager.kv file # They work sort of like @properties, but are different source = StringProperty('samples/goldhill.jpg') original_image = ObjectProperty(None) current_image = ObjectProperty(None) grayscale = ObjectProperty(None) hidden_text = ObjectProperty(None) text_input = ObjectProperty(None) image_processor = ObjectProperty(None) notifier = ObjectProperty(None) # Hidden fields not needed by imager.kv _operand = None # current executing option _op_args = None # arguments for the executing option def config(self): """Configures the application at start-up. Controllers are responsible for initializing the application and creating all of the other objects. This method does just that. It loads the currently selected image file, creates an ImageArray for that file, creates an ImageProcessor to handle the array, and connects the ImageProcessor to the two ImagePanel objects.""" # Load the image into an array image_array = ImageArray.LoadFile(self.source) # Create the processor for the given ImageArray self.image_processor = ImageProcessor(image_array) # Set up the display panels self.original_image_panel = ImagePanel(self.original_image, self.image_processor.original) self.current_image_panel = ImagePanel(self.current_image, self.image_processor.current) def error(self, msg): """Report an error to the user :param msg: the error message **Precondition**: a string The error message will take up most of the Window, and last until the user dismisses it.""" assert type(msg) == str, `msg`+' is not a string' content = ErrorDialog(label=msg, ok=self._dismiss_popup) self._popup = Popup(title='Error', content=content, size_hint=(0.9, 0.9)) self._popup.open() def do(self, trans, *args): """Perform a transformation on the image. :param trans: transformation method in ImageProcessor **Precondition** : a reference to a method or function, not a string for its name :param args: list of arguments for `transform` **Precondition**: a list or tuple with valid argument values This method does not enforce its preconditions. Use with care.""" if not self._operand is None: return # Say PROCESSING... self.notifier.color = [1,1,1,1] self._operand = trans self._op_args = args # Process the transform on the next clock cycle. Clock.schedule_once(self._do_async) def _do_async(self,dt): """Perform the active image transform. Hidden method that allows us to spread a transformation over two clock cycles. This allows us to print a progress message on the screen.""" # Perform the transformation if len(self._op_args) == 0: self._operand() else: self._operand(self._op_args[0]) # Remove the status message and redisplay self.notifier.color = [0,0,0,0] self.current_image_panel.display(self.image_processor.current) self._operand = None self.op_args = None def hide(self): """Stores the hidden message in the image via steganography. Calls the method from image_processor. Displays a pop-up if the method fails (i.e. returns False). Otherwise, message is now stored in the image.""" text = str(self.hidden_text.text) result = self.image_processor.hide(text) if not result: self.error('Nothing was hidden') def reveal(self): """Reveal the hidden message in the image. Calls the method from image_processor. Displays a pop-up if there is no message. Otherwise, places message in the text input box.""" self.hidden_text.text = '' text = self.image_processor.reveal() if text is None: self.error('No hidden message, apparently') else: self.hidden_text.text = '<message revealed:> ' + text def _dismiss_popup(self): """Used to dismiss the currently active pop-up""" self._popup.dismiss() def load(self): """Open a dialog to load an image file.""" content = LoadDialog(load=self._load_helper, cancel=self._dismiss_popup) self._popup = Popup(title="Load image", content=content, size_hint=(0.9, 0.9)) self._popup.open() def _load_helper(self, path, filename): """Callback function for load. Called when user selects a file. This method loads the image file and redisplays the ImagePanels. Hidden method used only internally. No preconditions enforced.""" self._dismiss_popup() if (len(filename) == 0): return self.source = str(os.path.join(path, filename[0])) self.config() self.original_image_panel.display() self.current_image_panel.display() def save(self): """Save the image in the current ImageArray to a file.""" content = SaveDialog(save=self._check_png, cancel=self._dismiss_popup) self._popup = Popup(title="Save image", content=content, size_hint=(0.9, 0.9)) self._popup.open() def _check_png(self, path, filename): """Make sure we are saving in .png format. If user uses another extension, or no extension at all, force the file to be a .png Hidden method used only internally. No preconditions enforced.""" self._dismiss_popup() if filename.lower().endswith('.png'): self._save_png(filename) else: i = filename.rfind('.') if i != -1: filename = filename[:i] # strip old extension filename += '.png' msg = 'File will be saved as\n{}\nin .png format. Proceed?' self._file_warning(msg.format(filename), filename, self._save_png) def _save_png(self, filename): """Check whether file exists before saving. Saves the file if does not exist or user confirms. Hidden method used only internally. No preconditions except png suffix enforced.""" assert filename.lower().endswith('.png') self._dismiss_popup() if os.path.isfile(filename): msg = 'File\n{}\nexists. Overwrite?' self._file_warning(msg.format(filename), filename, self._force_save) else: self._force_save(filename) def _force_save(self, filename): """Forceably saves the specified file, without user confirmation. Hidden method used only internally. No preconditions enforced.""" self._dismiss_popup() # prepare image for saving im = self.image_processor.current.image # Direct file descriptor save broken on Windows # with open(filename, 'w') as f: try: im.save(filename, 'PNG') except: self.error('Cannot save image file: ' + filename) #f.close def _file_warning(self, msg, filename, ok): """Alerts the user of an issue when trying to load or save a file Hidden method used only internally. No preconditions enforced.""" content = WarningDialog(label=msg, data=filename, ok=ok, cancel=self._dismiss_popup) self._popup = Popup(title='Warning', content=content, size_hint=(0.9, 0.9)) self._popup.open() def loadText(self): """Open a dialog to load a text file. Hidden method to try loading large messages into the text field. Used for grading purposed on hide/reveal, as the clipboard does not work on all OSs""" content = LoadDialog(load=self._load_text_helper, cancel=self._dismiss_popup) content.filechooser.filters = ['*.txt','*.py'] self._popup = Popup(title="Load image", content=content, size_hint=(0.9, 0.9)) self._popup.open() def _load_text_helper(self, path, filename): """Callback function for _load_text. Called when user selects a file. This method loads the text file and puts it in the text input box. Hidden method used only internally. No preconditions enforced.""" self._dismiss_popup() if (len(filename) == 0): return filename = str(os.path.join(path, filename[0])) instream = open(filename) self.hidden_text.text = instream.read()
from __future__ import absolute_import, division, print_function, unicode_literals from perceptron import MultiLayerPerceptron from image_processor import ImageProcessor from PIL import Image import pi3d import numpy as np import random import os mlp = MultiLayerPerceptron(2048, 64, 5, wi_file='wi_file.npy', wo_file='wo_file.npy') ip = ImageProcessor() flist = [] for dname, dnames, fnames in os.walk('./pictures'): for f in fnames: if 'JPG' in f.upper(): flist.append(os.path.join(dname, f)) def false_colour(a): X = [ 0.0, 0.25, 0.5, 0.75, 0.99, 1.0] R = [ 0.0, 0.0,255.0,255.0,255.0,255.0] G = [ 0.0, 0.0, 32.0,150.0,200.0,255.0] B = [64.0,120.0, 32.0, 32.0, 32.0, 0.0] #### 64.0 120.0 319.0 437.0 487.0 510.0 new_a = np.zeros(a.shape[:2] + (3,), dtype=np.float) new_a[:,:,0] = np.interp(a, X, R) new_a[:,:,1] = np.interp(a, X, G) new_a[:,:,2] = np.interp(a, X, B) return new_a.astype(np.uint8)
def __init__(self,fileScanner): self.fileScanner = fileScanner self.imageProcessor = ImageProcessor()
class Controller(object): def __init__(self): dispatcher.connect(self.handle_image_load_event, signal='load_image', sender=dispatcher.Any) dispatcher.connect(self.handle_filter_event, signal='apply_filter', sender=dispatcher.Any) self.image = None self.im_processor = ImageProcessor() self.user_interface = GUI() self.user_interface.run() def handle_filter_event(self, sender, args): filter_name, params = args if not self.image is None: if filter_name == 'Salt & pepper': self.image = self.im_processor.salt_and_pepper(self.image, params[0]) elif filter_name == 'Median': self.image = self.im_processor.median(self.image, int(params[0])) elif filter_name == 'Average': self.image = self.im_processor.average(self.image, int(params[0])) elif filter_name == 'Binarization': self.image = self.im_processor.binarization(self.image) elif filter_name == 'Color detection': self.image = self.im_processor.color_detection(self.image, params[0], int(params[1])) elif filter_name == 'Complement': self.image = self.im_processor.complement(self.image) elif filter_name == 'Diagonal lines': self.image = self.im_processor.diagonal(self.image, params[0]) elif filter_name == 'Difference': self.image = self.im_processor.difference(self.image, cv2.imread(params[0])) elif filter_name == 'High pass': self.image = self.im_processor.high_pass(self.image, int(params[0])) elif filter_name == 'Horizontal': self.image = self.im_processor.horizontal(self.image) elif filter_name == 'Hough': if params[0] == 'Line': self.image = self.im_processor.hough_lines(self.image) else: self.image = self.im_processor.hough_circles(self.image) elif filter_name == 'Intersection': self.image = self.im_processor.intersection(self.image, cv2.imread(params[0])) elif filter_name == 'Prewitt': self.image = self.im_processor.prewitt(self.image) elif filter_name == 'Roberts': self.image = self.im_processor.roberts(self.image) elif filter_name == 'Seam carving': self.image = self.im_processor.seam_carving(self.image, int(params[0])) elif filter_name == 'Sobel': self.image = self.im_processor.sobel(self.image) elif filter_name == 'Union': self.image = self.im_processor.union(self.image, cv2.imread(params[0])) colormap = self.im_processor.convert_bgr_to_rgb(self.image) self.user_interface.append_image(self.image, colormap, filter_name) else: print('There is no image set to apply this filter') def handle_image_load_event(self, sender, image_path): self.image = cv2.imread(image_path) print('the image was set')
def __init__(self, setconfig=False): rospy.init_node("senior_design") self.right_camera = ImageReceiver("right_hand_camera") self.left_camera = ImageReceiver("left_hand_camera") self.head_camera = ImageReceiver("head_camera") self.left_camera.disableCamera() self.head_camera.disableCamera() self.right_camera.enableCamera() distortion, camera_matrix = self.right_camera.getIntrinsics() self.move = MoveController("right") self.image_processor = ImageProcessor(self.move.home_pose, camera_matrix, distortion) self.block_list = {"PURPLE": [], "ORANGE": [], "GREEN": []} # This is all for loading / calculating /saving the configuration information i.e. box location and table height if setconfig: self.move.update_table_height() f = open("config.txt", "w") outStr = "TABLE_HEIGHT:" + str(self.move.table_height) + "\n" f.write(outStr) self.find_blocks() blackoutval = -100 for color in self.block_list: maxxvalue = -100 for block in self.block_list[color]: if block.pose.position.x > maxxvalue: maxxvalue = block.pose.position.x self.box_pose[color] = block.pose if maxxvalue > blackoutval: blackoutval = maxxvalue blackoutblock = block if maxxvalue == -100: print "ERROR ALL THREE COLORS NOT DETECTED MISSING " + color + " BLOCK" sys.exit() else: outStr = ( color + ":" + str(self.box_pose[color].position.x - 0.08) + "," + str(self.box_pose[color].position.y) + "\n" ) f.write(outStr) coord = blackoutblock.coord self.image_processor.blackout = min(coord[0][1], coord[2][1]) outStr = "BLACKOUT:" + str(self.image_processor.blackout) + "\n" f.write(outStr) f.close() else: # Load config file try: f = open("config.txt", "r") newLine = f.readline() while newLine != "": newLine = newLine.split(":") if len(newLine) == 2: if newLine[0] == "TABLE_HEIGHT": self.move.table_height = float(newLine[1]) self.image_processor.table_height = float(newLine[1]) self.update_home_pose(self.move.home_pose) if newLine[0] == "BLACKOUT": print int(newLine[1]) self.image_processor.blackout = int(newLine[1]) if self.box_pose.get(newLine[0], None) != None: coords = newLine[1].split(",") if len(coords) == 2: self.box_pose[newLine[0]].position.x = float(coords[0]) self.box_pose[newLine[0]].position.y = float(coords[1]) else: print "INVALID CONFIG FILE SHOULD BE COLOR:X,Y" + str(newLine) sys.exit() newLine = f.readline() f.close() except: print "MISSING config.txt PLEASE RUN WITH -c FLAG" print "PROCEEDING WITH DEFAULT TABLE HEIGHT AND NO IMAGE BLACKOUT VALUE"
class MasterController(object): box_pose = {} box_pose["PURPLE"] = Pose( position=Point(x=0.31, y=0.1445, z=0.1), orientation=Quaternion(x=0, y=math.pi / 4, z=0, w=0) ) box_pose["ORANGE"] = Pose( position=Point(x=0.31, y=0.1445 - 36 * 0.0254, z=0.1), orientation=Quaternion(x=0, y=math.pi / 4, z=0, w=0) ) box_pose["GREEN"] = Pose( position=Point(x=0.31, y=0.1445 - 27 * 0.0254, z=0.1), orientation=Quaternion(x=0, y=math.pi / 4, z=0, w=0) ) def __init__(self, setconfig=False): rospy.init_node("senior_design") self.right_camera = ImageReceiver("right_hand_camera") self.left_camera = ImageReceiver("left_hand_camera") self.head_camera = ImageReceiver("head_camera") self.left_camera.disableCamera() self.head_camera.disableCamera() self.right_camera.enableCamera() distortion, camera_matrix = self.right_camera.getIntrinsics() self.move = MoveController("right") self.image_processor = ImageProcessor(self.move.home_pose, camera_matrix, distortion) self.block_list = {"PURPLE": [], "ORANGE": [], "GREEN": []} # This is all for loading / calculating /saving the configuration information i.e. box location and table height if setconfig: self.move.update_table_height() f = open("config.txt", "w") outStr = "TABLE_HEIGHT:" + str(self.move.table_height) + "\n" f.write(outStr) self.find_blocks() blackoutval = -100 for color in self.block_list: maxxvalue = -100 for block in self.block_list[color]: if block.pose.position.x > maxxvalue: maxxvalue = block.pose.position.x self.box_pose[color] = block.pose if maxxvalue > blackoutval: blackoutval = maxxvalue blackoutblock = block if maxxvalue == -100: print "ERROR ALL THREE COLORS NOT DETECTED MISSING " + color + " BLOCK" sys.exit() else: outStr = ( color + ":" + str(self.box_pose[color].position.x - 0.08) + "," + str(self.box_pose[color].position.y) + "\n" ) f.write(outStr) coord = blackoutblock.coord self.image_processor.blackout = min(coord[0][1], coord[2][1]) outStr = "BLACKOUT:" + str(self.image_processor.blackout) + "\n" f.write(outStr) f.close() else: # Load config file try: f = open("config.txt", "r") newLine = f.readline() while newLine != "": newLine = newLine.split(":") if len(newLine) == 2: if newLine[0] == "TABLE_HEIGHT": self.move.table_height = float(newLine[1]) self.image_processor.table_height = float(newLine[1]) self.update_home_pose(self.move.home_pose) if newLine[0] == "BLACKOUT": print int(newLine[1]) self.image_processor.blackout = int(newLine[1]) if self.box_pose.get(newLine[0], None) != None: coords = newLine[1].split(",") if len(coords) == 2: self.box_pose[newLine[0]].position.x = float(coords[0]) self.box_pose[newLine[0]].position.y = float(coords[1]) else: print "INVALID CONFIG FILE SHOULD BE COLOR:X,Y" + str(newLine) sys.exit() newLine = f.readline() f.close() except: print "MISSING config.txt PLEASE RUN WITH -c FLAG" print "PROCEEDING WITH DEFAULT TABLE HEIGHT AND NO IMAGE BLACKOUT VALUE" def update_home_pose(self, pose): self.image_processor.update_home_pose(pose) self.move.home_pose = pose def get_home_image(self): self.move.move_to_home() rospy.sleep(0.1) image = self.right_camera.getImage() self.image_processor.setImage(image) def find_blocks(self): self.get_home_image() self.block_list["PURPLE"] = self.image_processor.findBlock("PURPLE") # print "Found " + str(len(self.block_list['PURPLE'])) + " purple blocks" self.block_list["ORANGE"] = self.image_processor.findBlock("ORANGE") # print "Found " + str(len(self.block_list['ORANGE'])) + " orange blocks" self.block_list["GREEN"] = self.image_processor.findBlock("GREEN") # print "Found " + str(len(self.block_list['GREEN'])) + " green blocks" def are_blocks_near(self, block): xthresh = 0 ythresh = 0.074 # ~3 inches xpose = block.pose.position.x ypose = block.pose.position.y isclose = False # print "Current block:________" #debug code # print block.pose.position #debug code # check for close blue blocks for color in self.block_list: for block2 in self.block_list[color]: # print "Blue block at:" #debug code # print block2.pose.position #debug code if (xpose != block2.pose.position.x) and (ypose != block2.pose.position.y): if math.fabs(xpose - block2.pose.position.x) < xthresh: print "Block at " print block.pose print "is close to blue block in the y direction." isclose = True if math.fabs(ypose - block2.pose.position.y) < ythresh: print "Block at " print block.pose print "is close to blue block in the y direction." isclose = True return isclose def get_blocks(self, trials=2, miss_per_trial=2): result = 0 for i in range(trials): self.find_blocks() num_missed = 0 for color in self.block_list: for block in self.block_list[color]: result = self.move.pick_at_pose(block.pose) # result == 0 means block pose was valid, if it wasn't go to the next block if result == 0: self.move.raise_up(block.pose) if not self.move.gripper.gripping(): self.move.gripper.open(block=False) num_missed += 1 if num_missed == miss_per_trial: break else: self.move.drop_at_pose(self.box_pose[color]) if num_missed == miss_per_trial: # want to break two loops when the gripper doesn't grip break if num_missed == 0: # Would mean that all blocks were found in the picture return num_missed return num_missed