def predict(image_path, checkpoint, topk, cat_names, gpu): start_time = time() device = torch.device(gpu if torch.cuda.is_available() else "cpu") model = load_model(None, checkpoint, None).double() im = Image.open(image_path) processed_image = process_image(im) test_image = processed_image.unsqueeze(0) # predict image with torch.set_grad_enabled(False): test_image.to(device) logps = model.forward(test_image) ps = torch.exp(logps) top_p, top_classes = ps.topk(topk, dim=1) idx_to_class = dict() # inverting dic for key, value in model.class_to_idx.items(): idx_to_class[value] = key # printing results for p, c in zip(top_p[0].tolist(),top_classes[0].tolist()): print(f"class name: {cat_to_name[idx_to_class[c]]}" f"\t with probability of: {p * 100 : .3f}%") end_time = time() tot_time = end_time - start_time print("\n** Total Elapsed Runtime:", str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":" +str(int((tot_time%3600)%60)) )
def run(): if arg.image is not 'None': # process image image = process_image(arg.image) print('Converting ' + arg.image + ' image to ' + arg.convert) if arg.convert == 'cartoon': # load model G_XtoY = load_generator(g_conv_dims, n_res_blocks, 'G_XtoY') # set model to eval G_XtoY.eval() # apply model result = G_XtoY(image) # plot and save results show_results(image, result, arg.filenames) elif arg.convert == 'human': # load model G_YtoX = load_generator(g_conv_dims, n_res_blocks, 'G_YtoX') # set model to eval G_YtoX.eval() # apply model result = G_YtoX(image) # plot and save results show_results(image, result, arg.filenames) else: raise NotImplementedError('Unknow convert {} parameter. Valid options are: human, cartoon'.format(arg.convert))
def extract_message(file_path, msg_length, offset=0, interleave=0, cipher=False): image = process_image(file_path, 100) raster = get_raster(image) values_raster = list(raster) bytes_offset = offset * 3 bytes_interleave = (interleave * 3) bin_msg_length = msg_length * 8 bin_message = '' empty_buffer = False for i in range(bytes_offset, len(raster), bytes_interleave + 3): if empty_buffer: break for j in range(i, i + 3): bin_character = _encode_bin(values_raster[j]) bin_message += bin_character[-1] bin_msg_length -= 1 if bin_msg_length == 0: empty_buffer = True break #if cipher == True: # return rot13(_decode_bin(bin_message)) return _decode_bin(bin_message)
def predict(image_path, model, device, topk): """ Predict the class (or classes) of an image using a trained deep learning model. Parameters: - image_path: path to the image for which we will predict the class(es) - model: the model to be used - device to be used: gpu or cpu - topk: the number of K most likely classes we want to calculate/return Returns: - top_probs: the top probabilities - classes: the top classes """ # set the mode for inference model.eval() # set the device model.to(device) # process the image image = hlp.process_image(image_path) image = np.expand_dims(image, 0) img_to_fwd = torch.from_numpy(image) img_to_fwd = img_to_fwd.to(device) # Turn off gradients to speed up this part with torch.no_grad(): # fwd pass get logits output = model.forward(img_to_fwd) # Calculate the class probabilities for img # ps = torch.exp(output) # Calculate the class probabilities (softmax) for img ps = F.softmax(output, dim=1) # get the top K largest values probs, classes = ps.topk(topk) # probs and classes are tensors, so we convert to lists so we return # as is required top_probs = probs.cpu().detach().numpy().tolist()[0] top_classes = classes.cpu().detach().numpy().tolist()[0] # I was getting the wrong class labels when converting, # the solution in the following helped me: # https://knowledge.udacity.com/questions/31597 idx_to_class = {val: key for key, val in model.class_to_idx.items()} classes = [] # convert the classes using idx_to_class for cls in top_classes: c = idx_to_class[cls] classes.append(c) # return the return top_probs, classes
def apply_filter(image_name, color, intensity, reading_block): data = process_image(image_name, reading_block) header = get_header(data) if color == 'R': red_img(data, header, intensity) if color == 'G': green_img(data, header, intensity) if color == 'B': blue_img(data, header, intensity) if color == 'BW': black_white(data, header, intensity)
def main(): params = parse_arguments() path = params.get('carrier_path') image = process_image(path, 100) header = get_header(image) offset, interleave, l_total, cipher = (header.get('offset'), header.get('interleave'), header.get('l_total'), header.get('cipher')) print( extract_message(path, int(l_total), int(offset), int(interleave), cipher))
def save(self): flag = not self.pk super(Kyc, self).save() if flag: from helpers import process_image from django.conf import settings import os path = os.path.join(settings.MEDIA_ROOT, self.image.url) res = process_image(path) res = { 'name': 'Vinayak', 'dob': '1991-08-29', 'pan_no': '32412312' } self.__dict__.update(res) self.save() return self
def main(): params = parse_arguments() block_size, path, message, offset, interleave, output, cipher = ( int(params.get('block_size')), params.get('carrier_path'), params.get('message'), params.get('pixels_offset', 0), params.get('pixels_interleave', 0), params.get('output_file'), params.get('cipher')) validate_params(path, message, block_size, offset, interleave) image = process_image(path, block_size) header = get_header(image) raster = get_raster(image) message = get_message(message) raster = rgb_threads(raster, message, offset, interleave, cipher) write_image(header, raster, output, offset, interleave, message, cipher) finish = perf_counter() tt = round(finish - start, 3) print("Total time: ", tt)
def predict(image_path, model, topk, device): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' loaded_model = load_checkpoint(model) print(loaded_model) loaded_model.to(device) im = Image.open(image_path) image = helpers.process_image(im) image = torch.from_numpy(image).float().to(device) image.unsqueeze_(0) with torch.no_grad(): log_ps = loaded_model.forward(image) ps = torch.exp(log_ps) top_p, top_class_idx = ps.topk(topk, dim=1) top_classes = [ loaded_model.idx_to_class[id] for id in top_class_idx.cpu().data.numpy().squeeze() ] top_p = top_p.cpu().data.numpy().squeeze() return top_p, top_classes
def send_img(self, topic, img): i = process_image(img) i = base64_encode_image(i) self._produce(topic, i)