def get_recipe(image_bytes): a=[] b=[] c=[] len1=[] len2=[] tensor = get_tensor(image_bytes) greedy = [True, False, False, False] beam = [-1, -1, -1, -1] temperature = 1.0 numgens = len(greedy) for i in range(numgens): with torch.no_grad(): outputs = model.sample(tensor, greedy=greedy[i],temperature=temperature, beam=beam[i], true_ingrs=None) ingr_ids = outputs['ingr_ids'].cpu().numpy() recipe_ids = outputs['recipe_ids'].cpu().numpy() data_dir = 'M:/Final Project/code/inversecooking-master/data' ingrs_vocab = pickle.load(open(os.path.join(data_dir, 'ingr_vocab.pkl'), 'rb')) vocab = pickle.load(open(os.path.join(data_dir, 'instr_vocab.pkl'), 'rb')) outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], ingrs_vocab, vocab) len1.append(len(outs['ingrs'])) len2.append(len(outs['recipe'])) a.append(outs['title']) b.append(outs['ingrs']) c.append(outs['recipe']) return a,b,c,len1,len2
def generate_recipe(image_tensor): num_valid = 1 recipes = [] for i in range(numgens): with torch.no_grad(): outputs = model.sample( image_tensor, greedy=greedy[i], temperature=temperature, beam=beam[i], true_ingrs=None, ) ingr_ids = outputs["ingr_ids"].cpu().numpy() recipe_ids = outputs["recipe_ids"].cpu().numpy() outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], ingrs_vocab, vocab) if valid["is_valid"]: logger.warning("Recipe succesfully generated!") num_valid += 1 # outs['title'], outs['ingrs'], outs['recipe'] logger.warning(outs["title"]) logger.warning(outs["ingrs"]) logger.warning(outs["recipe"]) logger.warning("Generating recipe # {}".format(len(recipes) + 1)) recipes.append(outs) else: logger.error("Recipe not valid, stopping...") if num_valid == 3: return recipes return recipes
def getReceipe(img_file, receipeModel): transf_list_batch = [] transf_list_batch.append(transforms.ToTensor()) transf_list_batch.append( transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))) to_input_transf = transforms.Compose(transf_list_batch) #greedy = [True, False, False, False] greedy = [True] beam = [-1, -1, -1, -1] temperature = 1.0 numgens = len(greedy) image_path = img_file image = Image.open(image_path).convert('RGB') transf_list = [] transf_list.append(transforms.Resize(256)) transf_list.append(transforms.CenterCrop(224)) transform = transforms.Compose(transf_list) image_transf = transform(image) image_tensor = to_input_transf(image_transf).unsqueeze(0).to(device) #plt.imshow(image_transf) # plt.axis('off') # plt.show() # plt.close() num_valid = 1 for i in range(numgens): with torch.no_grad(): outputs = receipeModel.sample(image_tensor, greedy=greedy[i], temperature=temperature, beam=beam[i], true_ingrs=None) ingr_ids = outputs['ingr_ids'].cpu().numpy() recipe_ids = outputs['recipe_ids'].cpu().numpy() ingrs_vocab = pickle.load( open(os.path.join(data_dir, 'ingr_vocab.pkl'), 'rb')) vocab = pickle.load( open(os.path.join(data_dir, 'instr_vocab.pkl'), 'rb')) outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], ingrs_vocab, vocab) print('-----------------') print(outs) print('-----------------') return outs return ""
for steps in range(length_steps): image_step = img_inputs[:, steps, :, :, :] caption_step = captions[:, steps, :] ingrs_step = ingr_gt[:, steps, :] action_step = action_gt[:, steps, :] true_caps = caption_step.clone()[:, 1:].contiguous() #new_img_PIL = transforms.ToPILImage()(image_step.cpu().squeeze(0)).convert('RGB') #new_img_PIL.show() # 处理后的PIL图片 with torch.no_grad(): outputs = model.sample(image_step, true_ingrs=None) ingr_ids = outputs['ingr_ids'].cpu().numpy() action_ids = outputs['action_ids'].cpu().numpy() recipe_ids = outputs['recipe_ids'].cpu().numpy() outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], action_ids[0], insts_vocab, ingrs_vocab, action_vocab) BOLD = '\033[1m' END = '\033[0m' print(BOLD + '\nInstructions:' + END) print('-' + ' '.join(outs['recipe'])) print(BOLD + '\nIngredients:' + END) print(', '.join(outs['ingrs'])) print(BOLD + '\nActions:' + END) print(', '.join(outs['action'])) print('=' * 20)
random.shuffle(demo_imgs) #demo_urls = ['https://food.fnr.sndimg.com/content/dam/images/food/fullset/2013/12/9/0/FNK_Cheesecake_s4x3.jpg.rend.hgtvcom.826.620.suffix/1387411272847.jpeg' demo_files =demo_imgs for img_file in demo_files: image_path = os.path.join(image_folder, img_file) image = Image.open(image_path).convert('RGB') transf_list = [] transf_list.append(transforms.Resize(256)) transf_list.append(transforms.CenterCrop(224)) transform = transforms.Compose(transf_list) image_transf = transform(image) image_tensor = to_input_transf(image_transf).unsqueeze(0).to(device) num_valid = 1 numgens = 1 for i in range(numgens): with torch.no_grad(): outputs = model.sample(image_tensor, greedy=greedy[i],temperature=temperature, beam=beam[i], true_ingrs=None) ingr_ids = outputs['ingr_ids'].cpu().numpy() recipe_ids = outputs['recipe_ids'].cpu().numpy() outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], ingrs_vocab, vocab) if True or valid['is_valid']: print ('RECIPE', num_valid) print("Image name",img_file) num_valid+=1 BOLD = '\033[1m' END = '\033[0m' #print (BOLD + '\nTitle:' + END,outs['title']) print (BOLD + '\nIngredients:'+ END) print (', '.join(outs['ingrs'])) print("Hello")
def process_url(url, food_processor): try: response = requests.get(url, stream=True) image = Image.open(BytesIO(response.content)) print("Got Image") image_transf = food_processor.transform(image) image_tensor = food_processor.to_input_transf(image_transf).unsqueeze( 0).to(food_processor.device) print("Image Transformed") with torch.no_grad(): outputs = food_processor.model.sample(image_tensor, greedy=True, temperature=1.0, beam=-1, true_ingrs=None) print("Got outputs") ingr_ids = outputs["ingr_ids"].cpu().numpy() recipe_ids = outputs["recipe_ids"].cpu().numpy() outs, valid = prepare_output(recipe_ids[0], ingr_ids[0], food_processor.ingrs_vocab, food_processor.vocab) is_valid = valid["is_valid"] title = outs["title"] recipes = outs["recipe"] ingrids = [] new_ingrids = [] alt_ingrids = {} for ingr in outs["ingrs"]: ingrids.append((ingr, food_processor.ingr_co2[ingr])) if len(food_processor.ingr_alternatives[ingr]) > 0: alt_ingrids[ingr] = food_processor.ingr_alternatives[ingr] # print(ingr, ingr_alternatives[ingr]) new_ingr = food_processor.ingr_alternatives[ingr][0][0] new_ingrids.append( (new_ingr, food_processor.ingr_co2[new_ingr])) for i, step_ in enumerate(recipes): step_ = step_.replace(ingr, new_ingr) recipes[i] = step_ else: new_ingrids.append((ingr, food_processor.ingr_co2[ingr])) # ingrids = outs["ingrs"] except Exception: is_valid = False title = "Awesome Recipe" ingrids = [("Potatoes", 1.0), ("And more Potatoes", 1.0)] alt_ingrids = {"Potatoes": [("Carrott", 0.5)]} new_ingrids = [("Carrot", 0.5), ("And more Potatoes", 1.0)] recipes = ["Eat", "Sleep", "Train", "Repeat"] traceback.print_stack() ret_dict = { "title": title, "ingredients": ingrids, "new": new_ingrids, "alternatives": alt_ingrids, "instructions": recipes, "is_valid": is_valid, "url": url, } id_ = get_random_string(16) req_store[id_] = ret_dict with open(tmp_json, "w") as fp_: json.dump(req_store, fp_) return {"id": id_}
def main(dir_file, image_folder, demo_path, lights): use_gpu = True device = torch.device( 'cuda' if torch.cuda.is_available() and use_gpu else 'cpu') map_loc = None if torch.cuda.is_available() and use_gpu else 'cpu' ingrs_vocab = pickle.load( open(os.path.join(dir_file, 'recipe1m_vocab_unit.pkl'), 'rb')) ingr_vocab_size = len(ingrs_vocab) t = time.time() import sys sys.argv = [''] del sys args = get_parser() args.maxseqlen = 15 args.ingrs_only = False model = get_model(args, ingr_vocab_size) # Load the trained model parameters model_dir = '/home/r8v10/git/InvCo/dataset/new_model/inversecooking/model/checkpoints' # model_dir = F'{ROOT_DIR}/dataset/model/inversecooking/model/checkpoints' model_path = os.path.join(model_dir, 'modelbest.ckpt') model.load_state_dict(torch.load(model_path, map_location=map_loc)) model.to(device) model.eval() model.ingrs_only = False model.recipe_only = False print('loaded model') print("Elapsed time:", time.time() - t) transf_list_batch = [] transf_list_batch.append(transforms.ToTensor()) transf_list_batch.append( transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))) to_input_transf = transforms.Compose(transf_list_batch) # set to true to load images from demo_urls instead of those in test_imgs folder use_urls = False #if True, it will show the recipe even if it's not valid show_anyways = True if use_urls: response = requests.get(demo_path) image = Image.open(BytesIO(response.content)) else: image_path = os.path.join(image_folder, demo_path) image = Image.open(image_path).convert('RGB') # print('Data path:', image_path) transf_list = [] transf_list.append(transforms.Resize(256)) transf_list.append(transforms.CenterCrop(224)) transform = transforms.Compose(transf_list) image_transf = transform(image) image_tensor = to_input_transf(image_transf).unsqueeze(0).to(device) num_valid = 1 temperature = 1.0 # greedy = [True, False, False, False] # beam = [-1, -1, -1, -1] while True: with torch.no_grad(): outputs = model.sample(image_tensor, greedy=False, temperature=temperature, beam=-1, true_ingrs=None) recipe_ids = outputs['recipe_ids'].cpu().numpy() outs, valid = prepare_output(recipe_ids[0], ingrs_vocab) num_valid += 1 if valid['is_valid'] or show_anyways: if valid['reason'] == 'All ok.': # print ('RECIPE', num_valid) # BOLD = '\033[1m' # END = '\033[0m' # print (BOLD + '\nTitle:' + END,outs['title']) # print (BOLD + '\nInstructions:'+END) # print ('-'+'\n-'.join(outs['recipe'])) # print ('='*20) #print ("Reason: ", valid['reason']) break recommend_id, recommend_lights = search(dir_file, outs['recipe'], lights) recommend_title, recommend_url = get_recipe(dir_file, recommend_id) # print('Recommendation of recipe:', recommend_id) # print('Title:', recommend_title) # print('Lights:', recommend_lights) # print('url:', recommend_url) return outs['title'], outs[ 'recipe'], recommend_lights, recommend_title, recommend_url