def main(): """Visual Qustion Answering Using Machine Learning and Streamlit """ __C = Cfgs() args = parse_args() args_dict = __C.parse_to_dict(args) cfg_file = "cfgs/{}_model.yml".format(args.MODEL) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f) args_dict = {**yaml_dict, **args_dict} __C.add_args(args_dict) __C.proc() print('Hyper Parameters:') print(__C) st.title("VQA tool using Streamlit") html_temp = """ <div style="background-color:tomato;padding:10px"> <h2 style="color:white;text-align:center;">ViPyKube ML App </h2> </div> """ st.markdown(html_temp, unsafe_allow_html=True) images = [] st.sidebar.title("Image selection and Question") button = st.sidebar.radio( 'Randomly generate images', ('With predefined questons', 'With custom question')) #while True: img_path_list = [] for x in range(0, 10): name = random.choice(os.listdir('./datasets/coco_extract/images')) print(str(name)) name = './datasets/coco_extract/images/' + name image = Image.open(name) img_path_list.append(name) images.append(image) image_iterator = paginator("Select a sunset page", images) indices_on_page, images_on_page = map(list, zip(*image_iterator)) st.image(images_on_page, width=200, caption=indices_on_page) pick_img = st.sidebar.selectbox("Which image?", [x for x in range(1, len(images))]) imp_path = img_path_list[int(pick_img)] print("chosen image ", imp_path) image = Image.open(imp_path) st.header("Selected Image") st.image(image) img_id = imp_path[44:-4] for ix in range(len(img_id)): if img_id[ix] != "0": img_id = img_id[ix:] print(img_id) break q_list = [] if button == "With predefined questons": pass elif button == "With custom question": question = st.sidebar.text_input("What is your question?") q_list = [{ "image_id": int(img_id), "question": question, "question_id": 1 }] start_eval = st.sidebar.button('Get the answer!') if start_eval: print('Loading testing set ........') dataset = DataSet(__C, q_list, imp_path[31:], img_id) eval(__C, dataset, valid=True)
parser.add_argument('--DATA_PATH', dest='DATASET_PATH', help='vqav2 dataset root path', type=str) parser.add_argument('--FEAT_PATH', dest='FEATURE_PATH', help='bottom up features root path', type=str) args = parser.parse_args() return args if __name__ == '__main__': __C = Cfgs() args = parse_args() args_dict = __C.parse_to_dict(args) cfg_file = "cfgs/{}_model.yml".format(args.MODEL) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f) args_dict = {**yaml_dict, **args_dict} __C.add_args(args_dict) __C.proc() print('Hyper Parameters:') print(__C)
def json_store(path, file): load_f = json.load(open(path, 'r', encoding='utf-8')) with open(path, 'w') as f: merge_file = {**load_f, **file} f.write(json.dumps(merge_file, indent=4)) def init_json(path_list): init = {} for path in path_list: with open(path, 'w') as f: f.write(json.dumps(init, indent=4)) __C = Cfgs() split_list = ['val', 'test'] stat_ques_list = [] path_list = ['datasets/vqa2db/ques_entities.json', 'datasets/vqa2db/failed_ques.json'] # init_json(path_list) for i in split_list: stat_ques_list += json.load(open(__C.QUESTION_PATH[i], 'r'))['questions'] # stat_ques_list = \ # json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \ # json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions'] + \ # json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \ # json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']